repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
AllenDowney/ThinkBayes2
examples/geiger_soln.ipynb
mit
# Configure Jupyter so figures appear in the notebook %matplotlib inline # Configure Jupyter to display the assigned value after an assignment %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' import numpy as np import pandas as pd # import classes from thinkbayes2 from thinkbayes2 import Pmf, Cdf, Suite, Joint from thinkbayes2 import MakePoissonPmf, EvalBinomialPmf, MakeMixture import thinkplot """ Explanation: Think Bayes Copyright 2018 Allen B. Downey MIT License: https://opensource.org/licenses/MIT End of explanation """ class Logistic(Suite, Joint): def Likelihood(self, data, hypo): """ data: k, number of particles detected hypo: r, emission rate in particles per second """ return 1 r = 160 k = 15 f = 0.1 pmf = MakePoissonPmf(r, high=500) thinkplot.Hist(pmf) total = 0 for n, p in pmf.Items(): total += p * EvalBinomialPmf(k, n, f) total def compute_likelihood(k, r, f): pmf = MakePoissonPmf(r, high=500) total = 0 for n, p in pmf.Items(): total += p * EvalBinomialPmf(k, n, f) return total compute_likelihood(k, r, f) likes = pd.Series([]) for kk in range(0, 40): likes[kk] = compute_likelihood(kk, r, f) likes.plot() thinkplot.decorate(xlabel='Counter particles (n)', ylabel='PMF') # Solution class Logistic(Suite, Joint): f = 0.1 def Likelihood(self, data, hypo): """ data: k, number of particles detected hypo: r, emission rate in particles per second """ k = data r = hypo return compute_likelihood(k, r, self.f) rs = np.linspace(0, 300, 51); suite = Logistic(rs); suite.Update(15) thinkplot.Pdf(suite) thinkplot.decorate(xlabel='Emission rate (particles/second)', ylabel='PMF', title='Posterior marginal distribution') """ Explanation: The Geiger counter problem I got the idea for the following problem from Tom Campbell-Ricketts, author of the Maximum Entropy blog. And he got the idea from E. T. Jaynes, author of the classic Probability Theory: The Logic of Science: Suppose that a radioactive source emits particles toward a Geiger counter at an average rate of r particles per second, but the counter only registers a fraction, f, of the particles that hit it. If f is 10% and the counter registers 15 particles in a one second interval, what is the posterior distribution of n, the actual number of particles that hit the counter, and r, the average rate particles are emitted? Grid algorithm End of explanation """ import pymc3 as pm # Solution f = 0.1 model = pm.Model() with model: r = pm.Uniform('r', 0, 500) n = pm.Poisson('n', r) k = pm.Binomial('k', n, f, observed=15) trace = pm.sample_prior_predictive(1000) thinkplot.Cdf(Cdf(trace['r'])); thinkplot.Cdf(Cdf(trace['n'])); thinkplot.Cdf(Cdf(trace['k'])); with model: trace = pm.sample(1000, tune=3000) pm.traceplot(trace); n_sample = trace['n'] thinkplot.Cdf(Cdf(n_sample)) r_sample = trace['r'] thinkplot.Cdf(Cdf(r_sample)) thinkplot.Cdf(suite.MakeCdf()) thinkplot.Cdf(Cdf(r_sample)) """ Explanation: MCMC Implement this model using MCMC. As a starting place, you can use this example from the PyMC3 docs. As a challege, try writing the model more explicitly, rather than using the GLM module. End of explanation """ # Solution class Logistic(Suite, Joint): f = 0.1 def Likelihood(self, data, hypo): """ data: k, number of particles detected hypo: r, n """ k = data r, n = hypo return EvalBinomialPmf(k, n, self.f) rs = np.linspace(0, 300, 51); suite = Logistic() for r in rs: pmf = MakePoissonPmf(r, high=500) for n, p in pmf.Items(): suite[r, n] += p suite.Normalize() suite.Update(15) pmf_r = suite.Marginal(0) thinkplot.Pdf(pmf_r) thinkplot.decorate(xlabel='Emission rate (particles/second)', ylabel='PMF', title='Posterior marginal distribution') pmf_n = suite.Marginal(1) thinkplot.Pdf(pmf_n) thinkplot.decorate(xlabel='Number of particles (n)', ylabel='PMF', title='Posterior marginal distribution') """ Explanation: Grid algorithm, version 2 End of explanation """ class Detector(Suite): """Represents hypotheses about n.""" def __init__(self, r, f, high=500): """Initializes the suite. r: known emission rate, r f: fraction of particles registered high: maximum number of particles, n """ pmf = MakePoissonPmf(r, high) super().__init__(pmf) self.r = r self.f = f def Likelihood(self, data, hypo): """Likelihood of the data given the hypothesis. data: number of particles counted hypo: number of particles hitting the counter, n """ k = data n = hypo return EvalBinomialPmf(k, n, self.f) r = 160 k = 15 f = 0.1 suite = Detector(r, f); suite.Update(15) class Emitter(Suite): """Represents hypotheses about r.""" def Likelihood(self, data, hypo): """Likelihood of the data given the hypothesis. data: number of counted per unit time hypo: Detector object """ return hypo.Update(data) rs = np.linspace(0, 300, 51); detectors = [Detector(r, f=0.1) for r in rs[1:]] suite = Emitter(detectors); suite.Update(15) pmf_r = Pmf() for detector, p in suite.Items(): pmf_r[detector.r] = p thinkplot.Pdf(pmf_r) mix = MakeMixture(suite); thinkplot.Pdf(mix) """ Explanation: Hierarchical version, as in the book End of explanation """
arongdari/almc
notebooks/freebase_subset_selector.ipynb
gpl-2.0
datafile = '../data/freebase/train_single_relation.txt' entities = set() relations = set() with open(datafile, 'r') as f: for line in f.readlines(): start, relation, end = line.split('\t') if start.strip() not in entities: entities.add(start.strip()) if end.strip() not in entities: entities.add(end.strip()) if relation.strip() not in relations: relations.add(relation) n_entities = len(entities) entities = list(entities) entity_dic = {entities[k]:k for k in range(len(entities))} n_relations = len(relations) relations = list(relations) relation_dic = {relations[k]:k for k in range(len(relations))} selected_relations = list() #manually selected list of relations selected_relations.append(relation_dic['place_of_birth']) selected_relations.append(relation_dic['place_of_death']) selected_relations.append(relation_dic['nationality']) selected_relations.append(relation_dic['location']) entity_count = np.zeros(n_entities) T = [lil_matrix((n_entities, n_entities), dtype=int) for k in range(n_relations)] cnt = 0 with open(datafile, 'r') as f: for line in f.readlines(): start, relation, end = line.split('\t') e_i = entity_dic[start.strip()] e_j = entity_dic[end.strip()] r_k = relation_dic[relation.strip()] T[r_k][e_i,e_j] = 1 if r_k in selected_relations: if e_i == e_j: entity_count[e_i] += 1 else: entity_count[e_i] += 1 entity_count[e_j] += 1 T = [X.tocsr() for X in T] entities = np.array(entities) relations = np.array(relations) plt.figure(figsize=(8,6)) plt.bar(range(n_relations), [T[k].nnz for k in range(n_relations)]) plt.xticks(np.arange(0.5, n_relations), relations, rotation='vertical') plt.title('Number of triples for each relation') print('num entity', n_entities) print('num triples', np.sum([T[k].nnz for k in range(n_relations)])) print('sparsity', np.sum([T[k].nnz for k in range(n_relations)])/(n_relations * n_entities**2)) """ Explanation: with open('train_single_relation.txt', 'w') as fw: with open('train', 'r') as f: for line in f.readlines(): start, relations, end = line.split('\t') if ',' not in relations: fw.write(line) End of explanation """ newT, entities, relations = pickle.load(open('../data/freebase/subset_3000.pkl', 'rb')) plt.figure(figsize=(8,6)) n_relations = len(relations) plt.bar(range(n_relations), [np.sum(newT[k]) for k in range(n_relations)]) plt.xticks(np.arange(0.5, n_relations), relations, rotation='vertical') plt.title('Number of triples for each relation') """ Explanation: Plot from dump End of explanation """
PiercingDan/mat245
Labs/Lab5/lab5_assignment.ipynb
mit
from sklearn import datasets bost = datasets.load_boston() bost.keys() bost.data.shape """ Explanation: MAT245 Lab 5 - Linear Regression Overview Regression analysis is a set of statistical techniques for modelling the relationships between a dependent variable and a set of independent (or predictor) variables. Linear regression in particular assumes these relationships are linear. We are going to explore a few different ways of finding "optimal" parameters for a linear regression model. Loading the data We will be working with a subset of scikit-learn's Boston housing dataset. The goal is to construct a linear regression model that predicts the price of a house given a few metrics about the neighbourhood it's in. The Boston data can be loaded by: End of explanation """ xs = bost.data[:, [2, 12]] """ Explanation: There are thirteen columns of data, and 506 samples. A description of what each column means can be found by investigating bost['DESCR']. For visualization purposes, we'll work with a 2 dimensional subset: End of explanation """ from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt fig = plt.figure(figsize=(10,10)) for i in range(1, 10): ax = fig.add_subplot(3, 3, i, projection='3d') ax.view_init(elev=10., azim = (360.0 / 9.) * (i - 1)) ax.scatter(xs[:,0], xs[:,1], bost.target) plt.show() """ Explanation: The data in column index 2 represents proportion of non-retail business acres in the town, while column index 12 is the median value of owner-occupied homes in the neighbourhood, priced in $1000's. The target variable (i.e. what we want to predict) are the house prices, these are stored in bost.target. Let's plot house prices as a function of our inputs to see what we're dealing with. End of explanation """
Salman-H/mars-search-robot
.ipynb_checkpoints/Rover_Project_Test_Notebook-checkpoint.ipynb
bsd-2-clause
#%%HTML #<style> code {background-color : orange !important;} </style> %matplotlib inline #%matplotlib qt # Choose %matplotlib qt to plot to an interactive window import cv2 # OpenCV for perspective transform import numpy as np import matplotlib.image as mpimg import matplotlib.pyplot as plt import scipy.misc # For saving images as needed import glob # For reading in a list of images from a folder """ Explanation: Rover Project Test Notebook This notebook contains the functions that provide the scaffolding needed to test out mapping methods. The following steps are taken to test functions and calibrate data for the project: The simulator is run in "Training Mode" and some data is recorded. Note: the simulator may crash if a large (longer than a few minutes) dataset is recorded; only a small data sample is required i.e. just some example images to work with. The functions are tested with the data. Functions are written and modified to report and map out detections of obstacles and rock samples (yellow rocks). process_image() function is populated with the appropriate steps/functions to go from a raw image to a worldmap. moviepy functions are used to construct a video output from processed image data. Once it is confirmed that mapping is working, perception.py and decision.py are modified to allow the rover to navigate and map in autonomous mode! Note: If, at any point, display windows freeze up or other confounding issues are encountered, Kernel should be restarted and output cleared from the "Kernel" menu above. Uncomment and run the next cell to get code highlighting in the markdown cells. End of explanation """ path = '../test_dataset/IMG/*' img_list = glob.glob(path) # Grab a random image and display it idx = np.random.randint(0, len(img_list)-1) image = mpimg.imread(img_list[idx]) plt.imshow(image) """ Explanation: Quick Look at the Data There's some example data provided in the test_dataset folder. This basic dataset is enough to get you up and running but if you want to hone your methods more carefully you should record some data of your own to sample various scenarios in the simulator. Next, read in and display a random image from the test_dataset folder End of explanation """ # In the simulator the grid on the ground can be toggled on for calibration. # The rock samples can be toggled on with the 0 (zero) key. # Here's an example of the grid and one of the rocks example_grid = '../calibration_images/example_grid1.jpg' example_rock = '../calibration_images/example_rock1.jpg' example_rock2 = '../calibration_images/example_rock2.jpg' grid_img = mpimg.imread(example_grid) rock_img = mpimg.imread(example_rock) rock_img2 = mpimg.imread(example_rock2) fig = plt.figure(figsize=(12,3)) plt.subplot(131) plt.imshow(grid_img) plt.subplot(132) plt.imshow(rock_img) plt.subplot(133) plt.imshow(rock_img2) """ Explanation: Calibration Data Read in and display example grid and rock sample calibration images. The grid is used for perspective transform and the rock image for creating a new color selection that identifies these samples of interest. End of explanation """ def perspect_transform(input_img, sourc_pts, destn_pts): """ Apply a perspective transformation to input 3D image. Keyword arguments: input_img -- 3D numpy image on which perspective transform is applied sourc_pts -- numpy array of four source coordinates on input 3D image destn_pts -- corresponding destination coordinates on output 2D image Return value: output_img -- 2D numpy image with overhead view """ transform_matrix = cv2.getPerspectiveTransform( sourc_pts, destn_pts ) output_img = cv2.warpPerspective( input_img, transform_matrix, (input_img.shape[1], input_img.shape[0]) # keep same size as input_img ) return output_img # Define calibration box in source (actual) and destination (desired) # coordinates to warp the image to a grid where each 10x10 pixel square # represents 1 square meter and the destination box will be 2*dst_size # on each side dst_size = 5 # Set a bottom offset (rough estimate) to account for the fact that the # bottom of the image is not the position of the rover but a bit in front # of it bottom_offset = 6 source = np.float32( [[14, 140], [301, 140], [200, 96], [118, 96]] ) destination = np.float32( [ [image.shape[1]/2 - dst_size, image.shape[0] - bottom_offset], [image.shape[1]/2 + dst_size, image.shape[0] - bottom_offset], [image.shape[1]/2 + dst_size, image.shape[0] - 2*dst_size - bottom_offset], [image.shape[1]/2 - dst_size, image.shape[0] - 2*dst_size - bottom_offset] ] ) warped = perspect_transform(grid_img, source, destination) plt.imshow(warped) # scipy.misc.imsave('../output/warped_example.jpg', warped) warped_rock = perspect_transform(rock_img, source, destination) warped_rock2 = perspect_transform(rock_img2, source, destination) fig = plt.figure(figsize=(16,7)) plt.subplot(221) plt.imshow(rock_img) plt.subplot(222) plt.imshow(rock_img2) plt.subplot(223) plt.imshow(warped_rock) plt.subplot(224) plt.imshow(warped_rock2) rock1_pixels = np.copy(rock_img) plt.imshow(rock1_pixels[90:112,150:172]) """ Explanation: Perspective Transform Define the perspective transform function and test it on an image. Four source points are selected which represent a 1 square meter grid in the image viewed from the rover's front camera. These source points are subsequently mapped to four corresponding grid cell points in our "warped" image such that a grid cell in it is 10x10 pixels viewed from top-down. Thus, the front_cam image is said to be warped into a top-down view image by the perspective transformation. The example grid image above is used to choose source points for the grid cell which is in front of the rover (each grid cell is 1 square meter in the sim). The source and destination points are defined to warp the image to a grid where each 10x10 pixel square represents 1 square meter. The following steps are used to warp an image using a perspective transform: Define 4 source points, in this case, the 4 corners of a grid cell in the front camera image above. Define 4 destination points (must be listed in the same order as source points!). Use cv2.getPerspectiveTransform() to get M, the transform matrix. Use cv2.warpPerspective() to apply M and warp front camera image to a top-down view. Refer to the following documentation for geometric transformations in OpenCV: http://docs.opencv.org/trunk/da/d6e/tutorial_py_geometric_transformations.html End of explanation """ def color_thresh_nav(input_img, rgb_thresh=(160, 160, 160)): """ Apply a color threshold to extract only ground terrain pixels. Keyword arguments: input_img -- numpy image on which RGB threshold is applied rgb_thresh -- RGB thresh tuple above which only ground pixels are detected Return value: nav_img -- binary image identifying ground/navigable terrain pixels """ # Create an array of zeros same xy size as input_img, but single channel nav_img = np.zeros_like(input_img[:, :, 0]) # Require that each of the R(0), G(1), B(2) pixels be above all three # rgb_thresh values such that pix_above_thresh will now contain a # boolean array with "True" where threshold was met pix_above_thresh = ( (input_img[:, :, 0] > rgb_thresh[0]) & (input_img[:, :, 1] > rgb_thresh[1]) & (input_img[:, :, 2] > rgb_thresh[2]) ) # Index the array of zeros with the boolean array and set to 1 (white) # those pixels that are above rgb_thresh for ground/navigable terrain nav_img[pix_above_thresh] = 1 # nav_img will now contain white pixels identifying navigable terrain return nav_img threshed = color_thresh_nav(warped) plt.imshow(threshed, cmap='gray') #scipy.misc.imsave('../output/warped_threshed.jpg', threshed*255) """ Explanation: Color Thresholding Define the color thresholding function for navigable terrain and apply it to the warped image. Ultimately, the map not only includes navigable terrain but also obstacles and the positions of the rock samples we're searching for. New functions are needed to return the pixel locations of obstacles (areas below the threshold) and rock samples (yellow rocks in calibration images), such that these areas can be mapped into world coordinates as well. Color thresholding for navigable terrain End of explanation """ def color_thresh_obst(input_img, rgb_thresh=(160, 160, 160)): """ Apply a color threshold to extract only mountain rock pixels. Keyword arguments: input_img -- numpy image on which RGB threshold is applied rgb_thresh -- RGB thresh tuple below which only obstacle pixels are detected Return value: nav_img -- binary image identifying rocks/obstacles terrain pixels """ # Create an array of zeros same xy size as input_img, but single channel obs_img = np.zeros_like(input_img[:, :, 0]) # Require that each of the R(0), G(1), B(2) pixels be below all three # rgb_thresh values such that pix_below_thresh will now contain a # boolean array with "True" where threshold was met #pix_below_thresh = ( # (input_img[:, :, 0] < rgb_thresh[0]) & # (input_img[:, :, 1] < rgb_thresh[1]) & # (input_img[:, :, 2] < rgb_thresh[2]) #) pix_below_thresh = ( (np.logical_and(input_img[:, :, 0] > 0,input_img[:, :, 0] <= rgb_thresh[0])) & (np.logical_and(input_img[:, :, 1] > 0,input_img[:, :, 1] <= rgb_thresh[1])) & (np.logical_and(input_img[:, :, 2] > 0,input_img[:, :, 2] <= rgb_thresh[2])) ) # Index the array of zeros with the boolean array and set to 1 (white) # those pixels that are below rgb_thresh for rocks/obstacles terrain obs_img[pix_below_thresh] = 1 # obs_img will now contain white pixels identifying obstacle terrain return obs_img threshed_obstacles_image = color_thresh_obst(warped) plt.imshow(threshed_obstacles_image, cmap='gray') """ Explanation: Color thresholding for obstacle terrain End of explanation """ def color_thresh_rock(input_img, low_bound, upp_bound): """ Apply a color threshold using OpenCV to extract pixels for gold rocks. Keyword arguments: input_img -- numpy image on which OpenCV HSV threshold is applied low_bound -- tuple defining lower HSV color value for gold rocks upp_bound -- tuple defining upper HSV color value for gold rocks Return value: threshed_img -- binary image identifying gold rock pixels """ # Convert BGR to HSV hsv_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2HSV) # Threshold the HSV image to get only colors for gold rocks threshed_img = cv2.inRange(hsv_img, low_bound, upp_bound) return threshed_img # define range of gold rock color in HSV lower_bound = (75, 130, 130) upper_bound = (255, 255, 255) # apply rock color threshold to original rocks 1 and 2 images threshed_rock_image = color_thresh_rock( rock_img, lower_bound, upper_bound ) threshed_rock2_image = color_thresh_rock( rock_img2, lower_bound, upper_bound ) # apply rock color threshold to warped rocks 1 and 2 images threshed_warped_rock_image = color_thresh_rock( warped_rock, lower_bound, upper_bound ) threshed_warped_rock2_image = color_thresh_rock( warped_rock2, lower_bound, upper_bound ) # verify correctness of gold rock threshold fig = plt.figure(figsize=(20,11)) plt.subplot(421) plt.imshow(rock_img) plt.subplot(422) plt.imshow(threshed_rock_image, cmap='gray') plt.subplot(423) plt.imshow(warped_rock) plt.subplot(424) plt.imshow(threshed_warped_rock_image, cmap='gray') plt.subplot(425) plt.imshow(rock_img2) plt.subplot(426) plt.imshow(threshed_rock2_image, cmap='gray') plt.subplot(427) plt.imshow(warped_rock2) plt.subplot(428) plt.imshow(threshed_warped_rock2_image, cmap='gray') """ Explanation: Color thresholding for gold rocks End of explanation """ def to_rover_coords(binary_img): """Convert all points on img coord-frame to those on rover's frame.""" # Identify nonzero pixels in binary image representing # region of interest e.g. rocks ypos, xpos = binary_img.nonzero() # Calculate pixel positions with reference to rover's coordinate # frame given that rover front cam itself is at center bottom of # the photographed image. x_pixel = -(ypos - binary_img.shape[0]).astype(np.float) y_pixel = -(xpos - binary_img.shape[1]/2).astype(np.float) return x_pixel, y_pixel def to_polar_coords(x_pix, y_pix): """Convert cartesian coordinates to polar coordinates.""" # compute distance and angle of 'each' pixel from origin and # vertical respectively distances = np.sqrt(x_pix**2 + y_pix**2) angles = np.arctan2(y_pix, x_pix) return distances, angles def rotate_pix(x_pix, y_pix, angle): """Apply a geometric rotation.""" angle_rad = angle * np.pi / 180 # yaw to radians x_pix_rotated = (x_pix * np.cos(angle_rad)) - (y_pix * np.sin(angle_rad)) y_pix_rotated = (x_pix * np.sin(angle_rad)) + (y_pix * np.cos(angle_rad)) return x_pix_rotated, y_pix_rotated def translate_pix(x_pix_rot, y_pix_rot, x_pos, y_pos, scale): """Apply a geometric translation and scaling.""" x_pix_translated = (x_pix_rot / scale) + x_pos y_pix_translated = (y_pix_rot / scale) + y_pos return x_pix_translated, y_pix_translated def pix_to_world(x_pix, y_pix, x_pos, y_pos, yaw, world_size, scale): """ Apply a geometric transformation i.e. rotation and translation to ROI. Keyword arguments: x_pix, y_pix -- numpy array coords of ROI being converted to world frame x_pos, y_pos, yaw -- rover position and yaw angle in world frame world_size -- integer length of the square world map (200 x 200 pixels) scale -- scale factor between world frame pixels and rover frame pixels Note: Requires functions rotate_pix and translate_pix to work """ # Apply rotation and translation x_pix_rot, y_pix_rot = rotate_pix( x_pix, y_pix, yaw ) x_pix_tran, y_pix_tran = translate_pix( x_pix_rot, y_pix_rot, x_pos, y_pos, scale ) # Clip pixels to be within world_size x_pix_world = np.clip(np.int_(x_pix_tran), 0, world_size - 1) y_pix_world = np.clip(np.int_(y_pix_tran), 0, world_size - 1) return x_pix_world, y_pix_world # Grab another random image idx = np.random.randint(0, len(img_list)-1) image = mpimg.imread(img_list[idx]) warped = perspect_transform(image, source, destination) threshed = color_thresh_nav(warped) # Calculate pixel values in rover-centric coords and # distance/angle to all pixels xpix, ypix = to_rover_coords(threshed) dist, angles = to_polar_coords(xpix, ypix) mean_dir = np.mean(angles) ######## TESTING ############ xpix = xpix[dist < 130] ypix = ypix[dist < 130] # Do some plotting fig = plt.figure(figsize=(12,9)) plt.subplot(221) plt.imshow(image) plt.subplot(222) plt.imshow(warped) plt.subplot(223) plt.imshow(threshed, cmap='gray') plt.subplot(224) plt.plot(xpix, ypix, '.') plt.ylim(-160, 160) plt.xlim(0, 160) arrow_length = 100 x_arrow = arrow_length * np.cos(mean_dir) y_arrow = arrow_length * np.sin(mean_dir) plt.arrow(0, 0, x_arrow, y_arrow, color='red', zorder=2, head_width=10, width=2) """ Explanation: Coordinate Transformations Define the functions used to do coordinate transforms and apply them to an image. End of explanation """ x_nav_test_pix, y_nav_test_pix = to_rover_coords(threshed) nav_test_dists, nav_test_angles = to_polar_coords(x_nav_test_pix, y_nav_test_pix) mean_test_angle = np.mean(nav_test_angles) # separate nav_test_angles into left and right angles nav_test_left_angles = nav_test_angles[nav_test_angles > 0] mean_test_left_angle = np.mean(nav_test_left_angles) nav_test_right_angles = nav_test_angles[nav_test_angles < 0] mean_test_right_angle = np.mean(nav_test_right_angles) print('nav_test_angles:') print(nav_test_angles) print('amount: ', len(nav_test_angles)) print('mean:', mean_test_angle * 180 / np.pi) print('') print('nav_test_left_angles:') print(nav_test_left_angles) print('amount: ', len(nav_test_left_angles)) print('mean:', mean_test_left_angle * 180 / np.pi) print('') print('nav_test_right_angles:') print(nav_test_right_angles) print('amount: ', len(nav_test_right_angles)) print('mean:', mean_test_right_angle * 180 / np.pi) print('') #### do some plotting ###### fig = plt.figure(figsize=(12,9)) plt.plot(x_nav_test_pix, y_nav_test_pix, '.') plt.ylim(-160, 160) plt.xlim(0, 180) arrow_length = 150 # main test angle x_mean_test_angle = arrow_length * np.cos(mean_test_angle) y_mean_test_angle = arrow_length * np.sin(mean_test_angle) plt.arrow(0, 0, x_mean_test_angle, y_mean_test_angle, color='red', zorder=2, head_width=10, width=2) # main left test angle x_mean_test_left_angle = arrow_length * np.cos(mean_test_left_angle) y_mean_test_left_angle = arrow_length * np.sin(mean_test_left_angle) plt.arrow(0, 0, x_mean_test_left_angle, y_mean_test_left_angle, color='yellow', zorder=2, head_width=10, width=2) # main right test angle x_mean_test_right_angle = arrow_length * np.cos(mean_test_right_angle) y_mean_test_right_angle = arrow_length * np.sin(mean_test_right_angle) plt.arrow(0, 0, x_mean_test_right_angle, y_mean_test_right_angle, color='blue', zorder=2, head_width=10, width=2) """ Explanation: Testing left and right nav angles End of explanation """ nav_x_pixs, nav_y_pixs = to_rover_coords(threshed) nav_dists, nav_angles = to_polar_coords(nav_x_pixs, nav_y_pixs) print('nav_x_pixs:') print(nav_x_pixs) print(nav_x_pixs.shape) print('') print('nav_y_pixs:') print(nav_y_pixs) print(nav_y_pixs.shape) print('') print('nav_dists:') print('len(nav_dists):', len(nav_dists)) print(nav_dists[:4]) print('mean:', np.mean(nav_dists)) print('shape:', nav_dists.shape) print('') # remove some pixels that are farthest away #indexes_to_remove = [] #trim_nav_x_pixs = np.delete(nav_x_pixs, x ) trim_nav_x_pixs = nav_x_pixs[nav_dists < 120] print('trim_nav_x_pixs') print(trim_nav_x_pixs) trim_nav_y_pixs = nav_y_pixs[nav_dists < 120] print('trim_nav_y_pixs') print(trim_nav_y_pixs) """ Explanation: Testing Image Pixels for Improving Fidelity End of explanation """ import pandas as pd # Change the path below to your data directory # If you are in a locale (e.g., Europe) that uses ',' as the decimal separator # change the '.' to ',' # Read in csv log file as dataframe df = pd.read_csv('../test_dataset_2/robot_log.csv', delimiter=';', decimal='.') csv_img_list = df["Path"].tolist() # Create list of image pathnames # Read in ground truth map and create a 3-channel image with it ground_truth = mpimg.imread('../calibration_images/map_bw.png') ground_truth_3d = np.dstack( (ground_truth*0, ground_truth*255, ground_truth*0) ).astype(np.float) class SensorData(): """ Create a class to be a container of rover sensor data from sim. Reads in saved data from csv sensor log file generated by sim which includes saved locations of front camera snapshots and corresponding rover position and yaw values in world coordinate frame """ def __init__(self): """ Initialize a SensorData instance unique to a single simulation run. worldmap instance variable is instantiated with a size of 200 square grids corresponding to a 200 square meters space which is same size as the 200 square pixels ground_truth variable allowing full range of output position values in x and y from the sim """ self.images = csv_img_list self.xpos = df["X_Position"].values self.ypos = df["Y_Position"].values self.yaw = df["Yaw"].values # running index set to -1 as hack because moviepy # (below) seems to run one extra iteration self.count = -1 self.worldmap = np.zeros((200, 200, 3)).astype(np.float) self.ground_truth = ground_truth_3d # Ground truth worldmap # Instantiate a SensorData().. this will be a global variable/object # that can be referenced in the process_image() function below data = SensorData() """ Explanation: Read in saved data and ground truth map of the world The next cell is all setup to read data saved from rover sensors into a pandas dataframe. Here we'll also read in a "ground truth" map of the world, where white pixels (pixel value = 1) represent navigable terrain. After that, we'll define a class to store telemetry data and pathnames to images. When the class (data = SensorData()) is instantiated, we'll have a global variable called data that can be referenced for telemetry and to map data within the process_image() function in the following cell. End of explanation """ def process_image(input_img): """ Establish ROIs in rover cam image and overlay with ground truth map. Keyword argument: input_img -- 3 channel color image Return value: output_img -- 3 channel color image with ROIs identified Notes: Requires data (a global SensorData object) Required by the ImageSequeceClip object from moviepy module """ # Example of how to use the SensorData() object defined above # to print the current x, y and yaw values # print(data.xpos[data.count], data.ypos[data.count], data.yaw[data.count]) # 1) Define source and destination points for perspective transform # 2) Apply perspective transform warped_img = perspect_transform(input_img, source, destination) # 3) Apply color threshold to identify following ROIs: # a. navigable terrain # b. obstacles # c. rock samples threshed_img_navigable = color_thresh_nav(warped_img) threshed_img_obstacle = color_thresh_obst(warped_img) threshed_img_rock = color_thresh_rock( warped_img, lower_bound, upper_bound ) # 4) Convert thresholded image pixel values to rover-centric coords navigable_x_rover, navigable_y_rover = to_rover_coords(threshed_img_navigable) obstacle_x_rover, obstacle_y_rover = to_rover_coords(threshed_img_obstacle) rock_x_rover, rock_y_rover = to_rover_coords(threshed_img_rock) ########################### TESTING ############################ nav_dists = to_polar_coords(navigable_x_rover, navigable_y_rover)[0] navigable_x_rover = navigable_x_rover[nav_dists < 130] navigable_y_rover = navigable_y_rover[nav_dists < 130] # 5) Convert rover-centric pixel values to world coords my_worldmap = np.zeros((200, 200)) my_scale = 10 # scale factor assumed between world and rover space pixels #curr_rover_xpos = data.xpos[data.count-1] #curr_rover_ypos = data.ypos[data.count-1] #curr_rover_yaw = data.yaw[data.count-1] navigable_x_world, navigable_y_world = pix_to_world( navigable_x_rover, navigable_y_rover, data.xpos[data.count], data.ypos[data.count], data.yaw[data.count], #curr_rover_xpos, #curr_rover_ypos, #curr_rover_yaw, my_worldmap.shape[0], my_scale ) obstacle_x_world, obstacle_y_world = pix_to_world( obstacle_x_rover, obstacle_y_rover, data.xpos[data.count], data.ypos[data.count], data.yaw[data.count], #curr_rover_xpos, #curr_rover_ypos, #curr_rover_yaw, my_worldmap.shape[0], my_scale ) rock_x_world, rock_y_world = pix_to_world( rock_x_rover, rock_y_rover, data.xpos[data.count], data.ypos[data.count], data.yaw[data.count], #curr_rover_xpos, #curr_rover_ypos, #curr_rover_yaw, my_worldmap.shape[0], my_scale ) # 6) Update worldmap (to be displayed on right side of screen) #data.worldmap[obstacle_y_world, obstacle_x_world] = (255,0,0) #data.worldmap[rock_y_world, rock_x_world] = (255,255,255) #data.worldmap[navigable_y_world, navigable_x_world] = (0,0,255) data.worldmap[obstacle_y_world, obstacle_x_world, 0] += 1 data.worldmap[rock_y_world, rock_x_world, 1] += 1 data.worldmap[navigable_y_world, navigable_x_world, 2] += 1 # 7) Make a mosaic image # First create a blank image (can be whatever shape) output_image = np.zeros( (input_img.shape[0] + data.worldmap.shape[0], input_img.shape[1]*2, 3) ) # Next we populate regions of the image with various output # Here we're putting the original image in the upper left hand corner output_image[0:input_img.shape[0], 0:input_img.shape[1]] = input_img # add a warped image to the mosaic warped = perspect_transform(input_img, source, destination) # Add the warped image in the upper right hand corner output_image[0:input_img.shape[0], input_img.shape[1]:] = warped # Overlay worldmap with ground truth map map_add = cv2.addWeighted(data.worldmap, 1, data.ground_truth, 0.5, 0) # Flip map overlay so y-axis points upward and add to output_image output_image[ input_img.shape[0]:, 0:data.worldmap.shape[1] ] = np.flipud(map_add) # Then putting some text over the image cv2.putText( output_image, "Populate this image with your analyses to make a video!", (20, 20), cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1 ) data.count += 1 # Keep track of the index in the Databucket() return output_image """ Explanation: Write a function to process stored images The process_image() function below is modified by adding in the perception step processes (functions defined above) to perform image analysis and mapping. The following cell is all set up to use this process_image() function in conjunction with the moviepy video processing package to create a video from the rover camera image data taken in the simulator. In short, we will be passing individual images into process_image() and building up an image called output_image that will be stored as one frame of the output video. A mosaic of the various steps of above analysis process and additional text can also be added. The output video ultimately demonstrates our mapping process. End of explanation """ # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from moviepy.editor import ImageSequenceClip # Define pathname to save the output video output = '../output/test_mapping.mp4' # Re-initialize data in case this cell is run multiple times data = SensorData() # Note: output video will be sped up because recording rate in # simulator is fps=25 clip = ImageSequenceClip(data.images, fps=60) new_clip = clip.fl_image(process_image) # process_image expects color images! %time new_clip.write_videofile(output, audio=False) """ Explanation: Make a video from processed image data Use the moviepy library to process images and create a video. End of explanation """ from IPython.display import HTML HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(output)) """ Explanation: This next cell should function as an inline video player If this fails to render the video, the alternative video rendering method in the following cell can be run. The output video mp4 is saved in the /output folder. End of explanation """ import io import base64 video = io.open(output, 'r+b').read() encoded_video = base64.b64encode(video) HTML(data='''<video alt="test" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4" /> </video>'''.format(encoded_video.decode('ascii'))) """ Explanation: Below is an alternative way to create a video in case the above cell did not work. End of explanation """
google/CFU-Playground
third_party/tflite-micro/tensorflow/lite/micro/examples/hello_world/train/train_hello_world_model.ipynb
apache-2.0
# Define paths to model files import os MODELS_DIR = 'models/' if not os.path.exists(MODELS_DIR): os.mkdir(MODELS_DIR) MODEL_TF = MODELS_DIR + 'model' MODEL_NO_QUANT_TFLITE = MODELS_DIR + 'model_no_quant.tflite' MODEL_TFLITE = MODELS_DIR + 'model.tflite' MODEL_TFLITE_MICRO = MODELS_DIR + 'model.cc' """ Explanation: Train a Simple TensorFlow Lite for Microcontrollers model This notebook demonstrates the process of training a 2.5 kB model using TensorFlow and converting it for use with TensorFlow Lite for Microcontrollers. Deep learning networks learn to model patterns in underlying data. Here, we're going to train a network to model data generated by a sine function. This will result in a model that can take a value, x, and predict its sine, y. The model created in this notebook is used in the hello_world example for TensorFlow Lite for MicroControllers. <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tflite-micro/blob/main/tensorflow/lite/micro/examples/hello_world/train/train_hello_world_model.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/tflite-micro/blob/main/tensorflow/lite/micro/examples/hello_world/train/train_hello_world_model.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> Configure Defaults End of explanation """ ! pip install tensorflow==2.4.0 """ Explanation: Setup Environment Install Dependencies End of explanation """ # TensorFlow is an open source machine learning library import tensorflow as tf # Keras is TensorFlow's high-level API for deep learning from tensorflow import keras # Numpy is a math library import numpy as np # Pandas is a data manipulation library import pandas as pd # Matplotlib is a graphing library import matplotlib.pyplot as plt # Math is Python's math library import math # Set seed for experiment reproducibility seed = 1 np.random.seed(seed) tf.random.set_seed(seed) """ Explanation: Import Dependencies End of explanation """ # Number of sample datapoints SAMPLES = 1000 # Generate a uniformly distributed set of random numbers in the range from # 0 to 2π, which covers a complete sine wave oscillation x_values = np.random.uniform( low=0, high=2*math.pi, size=SAMPLES).astype(np.float32) # Shuffle the values to guarantee they're not in order np.random.shuffle(x_values) # Calculate the corresponding sine values y_values = np.sin(x_values).astype(np.float32) # Plot our data. The 'b.' argument tells the library to print blue dots. plt.plot(x_values, y_values, 'b.') plt.show() """ Explanation: Dataset 1. Generate Data The code in the following cell will generate a set of random x values, calculate their sine values, and display them on a graph. End of explanation """ # Add a small random number to each y value y_values += 0.1 * np.random.randn(*y_values.shape) # Plot our data plt.plot(x_values, y_values, 'b.') plt.show() """ Explanation: 2. Add Noise Since it was generated directly by the sine function, our data fits a nice, smooth curve. However, machine learning models are good at extracting underlying meaning from messy, real world data. To demonstrate this, we can add some noise to our data to approximate something more life-like. In the following cell, we'll add some random noise to each value, then draw a new graph: End of explanation """ # We'll use 60% of our data for training and 20% for testing. The remaining 20% # will be used for validation. Calculate the indices of each section. TRAIN_SPLIT = int(0.6 * SAMPLES) TEST_SPLIT = int(0.2 * SAMPLES + TRAIN_SPLIT) # Use np.split to chop our data into three parts. # The second argument to np.split is an array of indices where the data will be # split. We provide two indices, so the data will be divided into three chunks. x_train, x_test, x_validate = np.split(x_values, [TRAIN_SPLIT, TEST_SPLIT]) y_train, y_test, y_validate = np.split(y_values, [TRAIN_SPLIT, TEST_SPLIT]) # Double check that our splits add up correctly assert (x_train.size + x_validate.size + x_test.size) == SAMPLES # Plot the data in each partition in different colors: plt.plot(x_train, y_train, 'b.', label="Train") plt.plot(x_test, y_test, 'r.', label="Test") plt.plot(x_validate, y_validate, 'y.', label="Validate") plt.legend() plt.show() """ Explanation: 3. Split the Data We now have a noisy dataset that approximates real world data. We'll be using this to train our model. To evaluate the accuracy of the model we train, we'll need to compare its predictions to real data and check how well they match up. This evaluation happens during training (where it is referred to as validation) and after training (referred to as testing) It's important in both cases that we use fresh data that was not already used to train the model. The data is split as follows: 1. Training: 60% 2. Validation: 20% 3. Testing: 20% The following code will split our data and then plots each set as a different color: End of explanation """ # We'll use Keras to create a simple model architecture model_1 = tf.keras.Sequential() # First layer takes a scalar input and feeds it through 8 "neurons". The # neurons decide whether to activate based on the 'relu' activation function. model_1.add(keras.layers.Dense(8, activation='relu', input_shape=(1,))) # Final layer is a single neuron, since we want to output a single value model_1.add(keras.layers.Dense(1)) # Compile the model using the standard 'adam' optimizer and the mean squared error or 'mse' loss function for regression. model_1.compile(optimizer='adam', loss='mse', metrics=['mae']) """ Explanation: Training 1. Design the Model We're going to build a simple neural network model that will take an input value (in this case, x) and use it to predict a numeric output value (the sine of x). This type of problem is called a regression. It will use layers of neurons to attempt to learn any patterns underlying the training data, so it can make predictions. To begin with, we'll define two layers. The first layer takes a single input (our x value) and runs it through 8 neurons. Based on this input, each neuron will become activated to a certain degree based on its internal state (its weight and bias values). A neuron's degree of activation is expressed as a number. The activation numbers from our first layer will be fed as inputs to our second layer, which is a single neuron. It will apply its own weights and bias to these inputs and calculate its own activation, which will be output as our y value. Note: To learn more about how neural networks function, you can explore the Learn TensorFlow codelabs. The code in the following cell defines our model using Keras, TensorFlow's high-level API for creating deep learning networks. Once the network is defined, we compile it, specifying parameters that determine how it will be trained: End of explanation """ # Train the model on our training data while validating on our validation set history_1 = model_1.fit(x_train, y_train, epochs=500, batch_size=64, validation_data=(x_validate, y_validate)) """ Explanation: 2. Train the Model Once we've defined the model, we can use our data to train it. Training involves passing an x value into the neural network, checking how far the network's output deviates from the expected y value, and adjusting the neurons' weights and biases so that the output is more likely to be correct the next time. Training runs this process on the full dataset multiple times, and each full run-through is known as an epoch. The number of epochs to run during training is a parameter we can set. During each epoch, data is run through the network in multiple batches. Each batch, several pieces of data are passed into the network, producing output values. These outputs' correctness is measured in aggregate and the network's weights and biases are adjusted accordingly, once per batch. The batch size is also a parameter we can set. The code in the following cell uses the x and y values from our training data to train the model. It runs for 500 epochs, with 64 pieces of data in each batch. We also pass in some data for validation. As you will see when you run the cell, training can take a while to complete: End of explanation """ # Draw a graph of the loss, which is the distance between # the predicted and actual values during training and validation. train_loss = history_1.history['loss'] val_loss = history_1.history['val_loss'] epochs = range(1, len(train_loss) + 1) plt.plot(epochs, train_loss, 'g.', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() """ Explanation: 3. Plot Metrics 1. Loss (or Mean Squared Error) During training, the model's performance is constantly being measured against both our training data and the validation data that we set aside earlier. Training produces a log of data that tells us how the model's performance changed over the course of the training process. The following cells will display some of that data in a graphical form: End of explanation """ # Exclude the first few epochs so the graph is easier to read SKIP = 50 plt.plot(epochs[SKIP:], train_loss[SKIP:], 'g.', label='Training loss') plt.plot(epochs[SKIP:], val_loss[SKIP:], 'b.', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() """ Explanation: The graph shows the loss (or the difference between the model's predictions and the actual data) for each epoch. There are several ways to calculate loss, and the method we have used is mean squared error. There is a distinct loss value given for the training and the validation data. As we can see, the amount of loss rapidly decreases over the first 25 epochs, before flattening out. This means that the model is improving and producing more accurate predictions! Our goal is to stop training when either the model is no longer improving, or when the training loss is less than the validation loss, which would mean that the model has learned to predict the training data so well that it can no longer generalize to new data. To make the flatter part of the graph more readable, let's skip the first 50 epochs: End of explanation """ plt.clf() # Draw a graph of mean absolute error, which is another way of # measuring the amount of error in the prediction. train_mae = history_1.history['mae'] val_mae = history_1.history['val_mae'] plt.plot(epochs[SKIP:], train_mae[SKIP:], 'g.', label='Training MAE') plt.plot(epochs[SKIP:], val_mae[SKIP:], 'b.', label='Validation MAE') plt.title('Training and validation mean absolute error') plt.xlabel('Epochs') plt.ylabel('MAE') plt.legend() plt.show() """ Explanation: From the plot, we can see that loss continues to reduce until around 200 epochs, at which point it is mostly stable. This means that there's no need to train our network beyond 200 epochs. However, we can also see that the lowest loss value is still around 0.155. This means that our network's predictions are off by an average of ~15%. In addition, the validation loss values jump around a lot, and is sometimes even higher. 2. Mean Absolute Error To gain more insight into our model's performance we can plot some more data. This time, we'll plot the mean absolute error, which is another way of measuring how far the network's predictions are from the actual numbers: End of explanation """ # Calculate and print the loss on our test dataset test_loss, test_mae = model_1.evaluate(x_test, y_test) # Make predictions based on our test dataset y_test_pred = model_1.predict(x_test) # Graph the predictions against the actual values plt.clf() plt.title('Comparison of predictions and actual values') plt.plot(x_test, y_test, 'b.', label='Actual values') plt.plot(x_test, y_test_pred, 'r.', label='TF predictions') plt.legend() plt.show() """ Explanation: This graph of mean absolute error tells another story. We can see that training data shows consistently lower error than validation data, which means that the network may have overfit, or learned the training data so rigidly that it can't make effective predictions about new data. In addition, the mean absolute error values are quite high, ~0.305 at best, which means some of the model's predictions are at least 30% off. A 30% error means we are very far from accurately modelling the sine wave function. 3. Actual vs Predicted Outputs To get more insight into what is happening, let's check its predictions against the test dataset we set aside earlier: End of explanation """ model = tf.keras.Sequential() # First layer takes a scalar input and feeds it through 16 "neurons". The # neurons decide whether to activate based on the 'relu' activation function. model.add(keras.layers.Dense(16, activation='relu', input_shape=(1,))) # The new second and third layer will help the network learn more complex representations model.add(keras.layers.Dense(16, activation='relu')) # Final layer is a single neuron, since we want to output a single value model.add(keras.layers.Dense(1)) # Compile the model using the standard 'adam' optimizer and the mean squared error or 'mse' loss function for regression. model.compile(optimizer='adam', loss="mse", metrics=["mae"]) """ Explanation: Oh dear! The graph makes it clear that our network has learned to approximate the sine function in a very limited way. The rigidity of this fit suggests that the model does not have enough capacity to learn the full complexity of the sine wave function, so it's only able to approximate it in an overly simplistic way. By making our model bigger, we should be able to improve its performance. Training a Larger Model 1. Design the Model To make our model bigger, let's add an additional layer of neurons. The following cell redefines our model in the same way as earlier, but with 16 neurons in the first layer and an additional layer of 16 neurons in the middle: End of explanation """ # Train the model history = model.fit(x_train, y_train, epochs=500, batch_size=64, validation_data=(x_validate, y_validate)) # Save the model to disk model.save(MODEL_TF) """ Explanation: 2. Train the Model We'll now train and save the new model. End of explanation """ # Draw a graph of the loss, which is the distance between # the predicted and actual values during training and validation. train_loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(train_loss) + 1) # Exclude the first few epochs so the graph is easier to read SKIP = 100 plt.figure(figsize=(10, 4)) plt.subplot(1, 2, 1) plt.plot(epochs[SKIP:], train_loss[SKIP:], 'g.', label='Training loss') plt.plot(epochs[SKIP:], val_loss[SKIP:], 'b.', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.subplot(1, 2, 2) # Draw a graph of mean absolute error, which is another way of # measuring the amount of error in the prediction. train_mae = history.history['mae'] val_mae = history.history['val_mae'] plt.plot(epochs[SKIP:], train_mae[SKIP:], 'g.', label='Training MAE') plt.plot(epochs[SKIP:], val_mae[SKIP:], 'b.', label='Validation MAE') plt.title('Training and validation mean absolute error') plt.xlabel('Epochs') plt.ylabel('MAE') plt.legend() plt.tight_layout() """ Explanation: 3. Plot Metrics Each training epoch, the model prints out its loss and mean absolute error for training and validation. You can read this in the output above (note that your exact numbers may differ): Epoch 500/500 10/10 [==============================] - 0s 10ms/step - loss: 0.0121 - mae: 0.0882 - val_loss: 0.0115 - val_mae: 0.0865 You can see that we've already got a huge improvement - validation loss has dropped from 0.15 to 0.01, and validation MAE has dropped from 0.33 to 0.08. The following cell will print the same graphs we used to evaluate our original model, but showing our new training history: End of explanation """ # Calculate and print the loss on our test dataset test_loss, test_mae = model.evaluate(x_test, y_test) # Make predictions based on our test dataset y_test_pred = model.predict(x_test) # Graph the predictions against the actual values plt.clf() plt.title('Comparison of predictions and actual values') plt.plot(x_test, y_test, 'b.', label='Actual values') plt.plot(x_test, y_test_pred, 'r.', label='TF predicted') plt.legend() plt.show() """ Explanation: Great results! From these graphs, we can see several exciting things: The overall loss and MAE are much better than our previous network Metrics are better for validation than training, which means the network is not overfitting The reason the metrics for validation are better than those for training is that validation metrics are calculated at the end of each epoch, while training metrics are calculated throughout the epoch, so validation happens on a model that has been trained slightly longer. This all means our network seems to be performing well! To confirm, let's check its predictions against the test dataset we set aside earlier: End of explanation """ # Convert the model to the TensorFlow Lite format without quantization converter = tf.lite.TFLiteConverter.from_saved_model(MODEL_TF) model_no_quant_tflite = converter.convert() # Save the model to disk open(MODEL_NO_QUANT_TFLITE, "wb").write(model_no_quant_tflite) # Convert the model to the TensorFlow Lite format with quantization def representative_dataset(): for i in range(500): yield([x_train[i].reshape(1, 1)]) # Set the optimization flag. converter.optimizations = [tf.lite.Optimize.DEFAULT] # Enforce integer only quantization converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.inference_input_type = tf.int8 converter.inference_output_type = tf.int8 # Provide a representative dataset to ensure we quantize correctly. converter.representative_dataset = representative_dataset model_tflite = converter.convert() # Save the model to disk open(MODEL_TFLITE, "wb").write(model_tflite) """ Explanation: Much better! The evaluation metrics we printed show that the model has a low loss and MAE on the test data, and the predictions line up visually with our data fairly well. The model isn't perfect; its predictions don't form a smooth sine curve. For instance, the line is almost straight when x is between 4.2 and 5.2. If we wanted to go further, we could try further increasing the capacity of the model, perhaps using some techniques to defend from overfitting. However, an important part of machine learning is knowing when to stop. This model is good enough for our use case - which is to make some LEDs blink in a pleasing pattern. Generate a TensorFlow Lite Model 1. Generate Models with or without Quantization We now have an acceptably accurate model. We'll use the TensorFlow Lite Converter to convert the model into a special, space-efficient format for use on memory-constrained devices. Since this model is going to be deployed on a microcontroller, we want it to be as tiny as possible! One technique for reducing the size of a model is called quantization. It reduces the precision of the model's weights, and possibly the activations (output of each layer) as well, which saves memory, often without much impact on accuracy. Quantized models also run faster, since the calculations required are simpler. In the following cell, we'll convert the model twice: once with quantization, once without. End of explanation """ def predict_tflite(tflite_model, x_test): # Prepare the test data x_test_ = x_test.copy() x_test_ = x_test_.reshape((x_test.size, 1)) x_test_ = x_test_.astype(np.float32) # Initialize the TFLite interpreter interpreter = tf.lite.Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_details = interpreter.get_input_details()[0] output_details = interpreter.get_output_details()[0] # If required, quantize the input layer (from float to integer) input_scale, input_zero_point = input_details["quantization"] if (input_scale, input_zero_point) != (0.0, 0): x_test_ = x_test_ / input_scale + input_zero_point x_test_ = x_test_.astype(input_details["dtype"]) # Invoke the interpreter y_pred = np.empty(x_test_.size, dtype=output_details["dtype"]) for i in range(len(x_test_)): interpreter.set_tensor(input_details["index"], [x_test_[i]]) interpreter.invoke() y_pred[i] = interpreter.get_tensor(output_details["index"])[0] # If required, dequantized the output layer (from integer to float) output_scale, output_zero_point = output_details["quantization"] if (output_scale, output_zero_point) != (0.0, 0): y_pred = y_pred.astype(np.float32) y_pred = (y_pred - output_zero_point) * output_scale return y_pred def evaluate_tflite(tflite_model, x_test, y_true): global model y_pred = predict_tflite(tflite_model, x_test) loss_function = tf.keras.losses.get(model.loss) loss = loss_function(y_true, y_pred).numpy() return loss """ Explanation: 2. Compare Model Performance To prove these models are accurate even after conversion and quantization, we'll compare their predictions and loss on our test dataset. Helper functions We define the predict (for predictions) and evaluate (for loss) functions for TFLite models. Note: These are already included in a TF model, but not in a TFLite model. End of explanation """ # Calculate predictions y_test_pred_tf = model.predict(x_test) y_test_pred_no_quant_tflite = predict_tflite(model_no_quant_tflite, x_test) y_test_pred_tflite = predict_tflite(model_tflite, x_test) # Compare predictions plt.clf() plt.title('Comparison of various models against actual values') plt.plot(x_test, y_test, 'bo', label='Actual values') plt.plot(x_test, y_test_pred_tf, 'ro', label='TF predictions') plt.plot(x_test, y_test_pred_no_quant_tflite, 'bx', label='TFLite predictions') plt.plot(x_test, y_test_pred_tflite, 'gx', label='TFLite quantized predictions') plt.legend() plt.show() """ Explanation: 1. Predictions End of explanation """ # Calculate loss loss_tf, _ = model.evaluate(x_test, y_test, verbose=0) loss_no_quant_tflite = evaluate_tflite(model_no_quant_tflite, x_test, y_test) loss_tflite = evaluate_tflite(model_tflite, x_test, y_test) # Compare loss df = pd.DataFrame.from_records( [["TensorFlow", loss_tf], ["TensorFlow Lite", loss_no_quant_tflite], ["TensorFlow Lite Quantized", loss_tflite]], columns = ["Model", "Loss/MSE"], index="Model").round(4) df """ Explanation: 2. Loss (MSE/Mean Squared Error) End of explanation """ # Calculate size size_tf = os.path.getsize(MODEL_TF) size_no_quant_tflite = os.path.getsize(MODEL_NO_QUANT_TFLITE) size_tflite = os.path.getsize(MODEL_TFLITE) # Compare size pd.DataFrame.from_records( [["TensorFlow", f"{size_tf} bytes", ""], ["TensorFlow Lite", f"{size_no_quant_tflite} bytes ", f"(reduced by {size_tf - size_no_quant_tflite} bytes)"], ["TensorFlow Lite Quantized", f"{size_tflite} bytes", f"(reduced by {size_no_quant_tflite - size_tflite} bytes)"]], columns = ["Model", "Size", ""], index="Model") """ Explanation: 3. Size End of explanation """ # Install xxd if it is not available !apt-get update && apt-get -qq install xxd # Convert to a C source file, i.e, a TensorFlow Lite for Microcontrollers model !xxd -i {MODEL_TFLITE} > {MODEL_TFLITE_MICRO} # Update variable names REPLACE_TEXT = MODEL_TFLITE.replace('/', '_').replace('.', '_') !sed -i 's/'{REPLACE_TEXT}'/g_model/g' {MODEL_TFLITE_MICRO} """ Explanation: Summary We can see from the predictions (graph) and loss (table) that the original TF model, the TFLite model, and the quantized TFLite model are all close enough to be indistinguishable - even though they differ in size (table). This implies that the quantized (smallest) model is ready to use! Note: The quantized (integer) TFLite model is just 300 bytes smaller than the original (float) TFLite model - a tiny reduction in size! This is because the model is already so small that quantization has little effect. Complex models with more weights, can have upto a 4x reduction in size! Generate a TensorFlow Lite for Microcontrollers Model Convert the TensorFlow Lite quantized model into a C source file that can be loaded by TensorFlow Lite for Microcontrollers. End of explanation """ # Print the C source file !cat {MODEL_TFLITE_MICRO} """ Explanation: Deploy to a Microcontroller Follow the instructions in the hello_world README.md for TensorFlow Lite for MicroControllers to deploy this model on a specific microcontroller. Reference Model: If you have not modified this notebook, you can follow the instructions as is, to deploy the model. Refer to the hello_world/train/models directory to access the models generated in this notebook. New Model: If you have generated a new model, then update the values assigned to the variables defined in hello_world/model.cc with values displayed after running the following cell. End of explanation """
jasonjensen/Montreal-Python-Web
2.Python_Quickstart.ipynb
apache-2.0
# Assign value 1 to variable x x = 1 """ Explanation: Python Quickstart Workshop on Web Scraping and Text Processing with Python by Radhika Saksena, Princeton University, saksena@princeton.edu, radhika.saksena@gmail.com Disclaimer: The code examples presented in this workshop are for educational purposes only. Please seek advice from a legal expert about the legal implications of using this code for web scraping. 1. First things first This notebook describes some Python basics which we will be using throughout the workshop. Please go through this material and try out the code examples using IPython Notebook which comes with Anaconda (https://store.continuum.io/cshop/anaconda/). 1.1 Executing code in IPython Notebook Click within an existing "Code" Cell or write new code in a "Code" Cell. Type shift-Enter to execute the Python code contained in the Cell. 1.2 Python Indentation Indentation is significant in Python. Instead of curly braces to demarcate a code block (as in C++, Java, R, etc.), consecutive statements with the same level of indentation are identified as being in the same block. Any number of spaces is valid indentation. Four spaces for each level of indentation is conventional among programmers. In IPython Notebook, simply use the [tab] key for each new level of indentation. This gets converted to four spaces automatically. 1.3 Comments in Python Single-line comments start with a # symbol and end with the end of the line Comments can be placed on a line by themselves End of explanation """ x = 1 # Assign value 1 to variable x """ Explanation: Comments can also be placed on the same line as the code as shown here. End of explanation """ """This is a multi-line comment. Assign value 1 to variable x.""" x = 1 """ Explanation: For multi-line comments, use triple-quoted strings. End of explanation """ print(1) # Print a constant x = 2014 print(x) # Print an integer variable xstr = "Hello World." # Print a string print(xstr) print(x,xstr) # Print multiple objects print("String 1" + " " + "String2") # Concatenate multiple strings and print them """ Explanation: 1.4 Python's print() function The print function is used to print variables and expressions to screen. print() offers a lot of functionality which we'll encounter during the workshop. For now, note that:<br/> You can pass anything to the print() function and it will attempt to print its arguments. End of explanation """ x = 1 print("Formatted integer is {0:06d}".format(x)) # Note the format specification, 06d, for the integer. y = 12.66666666667 print("Formatted floating point number is {0:2.3f}".format(y)) # Note the format specification, 2.3f, for the floating point number. iStr = "Hello World" fStr = "Goodbye World" print("Initial string: {0:s} . Final string: {1:s}.".format(iStr,fStr)) # Note the format specification, s, for the string. print("Initial string: {0} . Final string: {1}.".format(iStr,fStr)) # In this case, omitting the s format specified works too. x = 1 print("Formatted integer is {0:06d}".format(x)) y = 12.66666666667 print("Formatted floating point number is {0:2.3f}".format(y)) """ Explanation: For web-scraping and text-processing type tasks, we'd like better control over how things get printed out, such as the number of decimal places when printing out floating point numbers. Use the format() method on the string to be printed out to control the output format. End of explanation """ year = 2014 print(year) print("The year is %d." % year) print(type(year)) help(year) help(int) """ Explanation: 2. Numeric Variable Types 2.1 Integers End of explanation """ mean = (1.0 + 0.7 + 2.1)/3.0 print(mean) print("The mean is %6.2f." % mean) print(type(mean)) help(mean) help(float) """ Explanation: 2.2. Floating Point Numbers End of explanation """ x = 2**3 # ** is the exponentiation operator print(x) x = 9 % 4 # % is the modulus operator print(x) x = 9 // 4 # // is the operator for floor division print(x) """ Explanation: 3. Basic Operators 3.1 Arithmetic Operators Standard arithmetic operators for addition (+), subtraction (-), multiplication (*) and division (/) are supported in Python. We have already seen use of the addition (+) and division (/) operators. Some more operators that are commonly encountered are demonstrated below. End of explanation """ x = 2.0 y = 5.0 y += x # y = y + x print(y) y %= x # y = y%x print(y) """ Explanation: 3.2. Assignment Operators In addition to using the = (simple assignment operator) for assigning values to variables, one can use a composite assignment operator(+=, -=, etc.) that combines the simple assignment operator with all of these arithmetic expressions. For example: End of explanation """ x = 1 y = 1 x == y # Check for equality x = 1 y = 1 x != 1 # Check for inequality x = 0.5 y = 1.0 x > y # Check if x greater than y x < y # Check if x less than y x >= y # Check if x greater than equal to y x <= y # Check if x less than equal to y """ Explanation: 3.3. Comparison Operators End of explanation """ a = 99 b = 99 (a == b) and (a <= 100) # use the and operator to check if both the operands are true a = True b = False a and b a = True b = False a or b # use the or operator to check if at least one of the two operands is true a = 100 b = 100 a == b not(a == b) # use the not operator to reverse a logical statement """ Explanation: 3.4. Logical Operators Logical operators such as <tt>and</tt>, <tt>or</tt>, <tt>not</tt> allow specification of composite conditions, for example in <tt>if</tt> statements as we will see shortly. End of explanation """ pythonStr = 'A first Python string.' # String specified with single quotes. print(type(pythonStr)) print(pythonStr) pythonStr = "A first Python string" # String specified with double quotes. print(type(pythonStr)) print(pythonStr) pythonStr = """A multi-line string. A first Python string.""" # Multi-line string specified with triple quotes. print(type(pythonStr)) print(pythonStr) """ Explanation: 4. Strings A string is a sequence of characters. Strings are specified by using single quotes (' ') or double quotes (" "). Multi-line strings can be specified with triple quotes. End of explanation """ str1 = " Rock " str2 = " Paper " str3 = " Scissors " longStr = str1 + str2 + str3 print(longStr) """ Explanation: Strings can be concatenated using the addition(+) operator. End of explanation """ str1 = "Rock,Paper,Scissors\n" repeatStr = str1*5 print(repeatStr) """ Explanation: Strings can also be repeated with the multiplication (*) operator. End of explanation """ str1 = "Python" lenStr1 = len(str1) print("The length of str: is " + str(lenStr1) + ".") """ Explanation: The len() function returns the length of a string. End of explanation """ str1 = "Python" print(str1[0]) # Print the first character element of the string. print(str1[len(str1)-1]) # Print the last character element of the string. print(str1[2:4]) # Print a 2-element slice of the string, starting from the 2-nd element up to but not including the 4-th element. """ Explanation: Since, the Python string is a sequence of characters, individual characters in the string can be indexed. Note that, unlike R, in Python sequences indexing starts at 0 and goes up to one less than the length of the sequence. End of explanation """ str1 = "Python" str1[1] = "3" # Error, strings can't be modified. """ Explanation: Strings are immutable. That is, an existing instance of a string cannot be modified. Instead, a new string that contains the modification should be created. End of explanation """ str1 = "Python" print(str1.upper()) # Convert str1 to all uppercase. str2 = "PYTHON" print(str1.lower()) # Convert str2 to all lowercase. str3 = "Rock,Paper,Scissors,Lizard,Spock" print(str3.split(",")) # Split str3 using "," as the separator. A list of string elements is returned. str4 = "The original string has trailing spaces.\t\n" print("***"+str4.strip()+"***") # Print stripped string with trailing space characters removed. """ Explanation: Strings come with some powerful methods (https://docs.python.org/2/library/stdtypes.html#string-methods). Some of the string methods that we will often use in web scraping are shown bellow. End of explanation """ # pyList contains an integer, string and floating point number pyList = [2014,"02 June", 74.5] # Print all the elements of pyList print(pyList) print("\n") # Print the length of pyList obtained using the len() function print("Length of pyList is: {0}.\n".format(len(pyList))) """ Explanation: 5. Python Data Structures 5.1 Lists List is an indexed collection of items. Each of the list items can be of arbitrary type. Note the square brackets in the pyList list declaration below. The len() function returns the length of the list. End of explanation """ print(pyList) print("\n") # Print the first element of pyList. Remember, indexing starts with 0. print("First element of pyList: {0}.\n".format(pyList[0])) # Print the last element of pyList. Last element can be conveniently indexed using -1. print("Last element of pyList: {0}.\n".format(pyList[-1])) # Also the last element has index = (length of list - 1) check = (pyList[2] == pyList[-1]) print("Is pyList[2] equal to pyList[-1]?\n{0}.\n".format(check)) # Assign a new value to the third element of the list pyList[2] = -99.0 print("Modified element of pyList[2]: {0}.\n".format(pyList[2])) """ Explanation: List elements can be individually referenced using their index in the list. Python indexing starts with 0 and runs up to the length of the sequence - 1. The square bracket is used to specify the index in to the list. This notation can also be used to assign values to the elements of the list. In contrast to strings, lists are mutable. End of explanation """ pyList = ["rock","paper","scissors","lizard","Spock"] print(pyList[2:4]) # Print elements of a starting from the second, up to but not including the fourth. print(pyList[:2]) # Print the first two elements of pyList. print(pyList[2:]) # Print all the elements of pyList starting from the second. print(pyList[:]) # Print all the elements of pyList """ Explanation: Python lists can be sliced using the slice notation of two indices separated by a colon. An omitted first index indicates 0 and an omitted second index indicates the length of the list/sequence. End of explanation """ pyList = ["rock","paper","scissors","lizard","Spock"] pyList[2:4] = ["gu","pa"] # Replace the second and third elements of pyList print("Original contents of pyList:") print(pyList) print("\n") pyList[:] = [] # Clear pyList, replace all items with an empty list print("Modified contents of pyList:") print(pyList) """ Explanation: Python slice notation can also be used to assign into lists. End of explanation """ pyList = ["rock","paper"] print("Printing Python list pyList:") print(pyList) print("\n") pyList.append("scissors") print("Appended the string 'scissors' to pyList:") print(pyList) print("\n") anotherList = ["lizard","Spock"] pyList.extend(anotherList) print("Extended pyList:") print(pyList) print("\n") """ Explanation: Python lists come with useful methods to add elements - append() and extend() End of explanation """ pyList1 = ["rock","paper","scissors"] pyList2 = ["lizard","Spock"] newList = pyList1 + pyList2 print("New list:") print(newList) """ Explanation: Python lists can be concatenated using the "+" operator (similar to strings). End of explanation """ pyLists = [["rock","paper","scissors"], ["ji","gu","pa"]] # Print the first element (0-th index) of pyLists which is itself a list print("pyLists[0] = ") print(pyLists[0]) print("\n") # Print the 0-th index element of the first list element in pyLists print("pylists[0][0] = " + pyLists[0][0] + ".") print("\n") # Print the second element of pyLists which is itself a list print("pyLists[1] = ") print(pyLists[1]) print("\n") # Print the 0-th index element of the second list element in pyLists print("pyLists[1][0] = " + pyLists[1][0] + ".") print("\n") pyList = [1,3,4,2] pyList.sort(reverse=True) sum(pyList) 2*(pyList) #2**(pyList) """ Explanation: Python lists can be nested - list within a list within a list and so on. An index needs to be specified for each level of nesting. End of explanation """ # pyTuple contains an integer, string and floating point number pyTuple = (2014,"02 June", 74.5) # Print all the elements of pyTuple print("pyTuple is: ") print(pyTuple) print("\n") # Print the length of pyTuple obtained using the len() function print("Length of pyTuple is: {0}.\n".format(len(pyTuple))) """ Explanation: 5.2. Tuples Tuples are another sequence data type consisting of arbitrary items separated by commas. In contrast to lists, tuples are immutable, i.e., they cannot be modified. See below for a declaration of a tuple. Note the parentheses in the declaration. End of explanation """ pyTuple[1] = "31 December" # Error as pyTuple is a tuple and hence, immutable """ Explanation: Tuples are immutable. Attempting to change elements of a tuple will result in errors. End of explanation """ pyTuple = "rock", "paper", "scissors" # pack the strings into a tuple named pyTuple print(pyTuple) str0,str1,str2 = pyTuple # unpack the tuple into strings named str0, str1, str2 print("str0 = " + str0 + ".") print("str1 = " + str1 + ".") print("str2 = " + str2 + ".") """ Explanation: Tuples can be packed from and unpacked into individual elements. End of explanation """ pyTuples = (("rock","paper","scissors"),("ji","gu","pa")) print("pyTuples[0] = {0}.".format(pyTuples[0])) # Print the first sub-tuple in pyTuples. print("pyTuples[1] = {0}.".format(pyTuples[1])) # Print the second sub-tuple in pyTuples. """ Explanation: One can declare tuples of tuples. End of explanation """ pyNested = (["rock","paper","scissors"],["ji","gu","pa"]) pyNested[0][2] = "lizard" # OK, list within the tuple is mutable print(pyNested[0]) # Print first list element of the tuple """ Explanation: One can declare a tuple of lists. End of explanation """ pyNested = [("rock","paper","scissors"),("ji","gu","pa")] pyNested[0][2] = "lizard" # Error, tuples is immutable* """ Explanation: One can also declare a list of tuples. End of explanation """ pyDict = {"Canada":"CAN","Argentina":"ARG","Austria":"AUT"} print("pyDict: {0}.".format(pyDict)) print("pyDict['Argentina']: " + pyDict['Argentina'] + ".") # Print the value corresponding to key 'afghanistan' print(pyDict.keys()) print(pyDict.values()) # Return all the values in the dictionary as a list. print(pyDict.items()) # Return key, value pairs from the dictionary as a list of tuples. """ Explanation: 5.3. Dictionaries A Python dictionary is an unordered set of key:value pairs that acts as an associate arrays. The keys are immutable and unique within one dictionary. In contrast to lists and tuples, dictionaries are indexed by keys. Note the use of curly braces in the declaration of the dictionary below. End of explanation """ pyDicts = {"Canada":{"Alpha-2":"CA","Alpha-3":"CAN","Numeric":"124"}, "Argentina":{"Alpha-2":"AR","Alpha-3":"ARG","Numeric":"032"}, "Austria":{"Alpha-2":"AT","Alpha-3":"AUT","Numeric":"040"}} print("pyDicts['Canada'] = {0}.".format(pyDicts['Canada'])) print("pyDicts['Canada']['Alpha-2'] = {0}.".format(pyDicts['Canada']['Alpha-2'])) """ Explanation: Parsing hierarchical data structures involving Python dictionaries will be very useful when working with the JSON data format and APIs such as the Twitter API. Values in a dictionary can be any object including other dictionaries. End of explanation """ pyNested = {"Canada":[2011,2008,2006,2004,2000 ],"Argentina":[2013,2011,2009,2007,2005],"Austria":[2013,2008,2006,2002,1999]} print("pyNested['Canada'] = {0}".format(pyNested['Canada'])) print("pyNested['Austria'][4] = {0}.".format(pyNested['Austria'][4])) """ Explanation: Values in a dictionary can also be lists. End of explanation """ pyNested = [{"year":2011,"countries":["Canada","Argentina"]}, {"year":2008,"countries":["Canada","Austria"]}, {"year":2006,"countries":["Canada","Austria"]}, {"year":2013,"countries":["Argentina","Austria"]}] print("pyNested[0] = {0}".format(pyNested[0])) print("pyNested[0]['year'] = {0}, pyNested[0]['countries'] = {1}.".format(pyNested[0]['year'],pyNested[0]['countries'])) """ Explanation: Lastly, we can have lists of dictionaries End of explanation """ pyNested = [{"year":2011,"countries":["Canada","Argentina"]}, {"year":2008,"countries":["Canada","Austria"]}, {"year":2006,"countries":["Canada","Austria"]}, {"year":2013,"countries":["Argentina","Austria"]}] # Check if first dictionary element of pyNested corresponds to years 2006 or 2008 if(pyNested[0]["year"] == 2008): print("Countries corresponding to year 2008 are: {0}.".format(pyNested[0]["countries"])) elif(pyNested[0]["year"] == 2011): print("Countries corresponding to year 2011 are: {0}.".format(pyNested[0]["countries"])) else: print("The first element does not correspond to either 2008 or 2011.") """ Explanation: 6. Control Flow 6.1 <tt>if</tt> Statements An if statement, coupled with zero or more elif statements can allow the execution of the script to be altered based on some condition. Here is an example. End of explanation """ countryList = ["Canada", "United States of America", "Mexico"] for country in countryList: # Loop over countryList, set country to next element in list. print(country) countryDict = {"Canada":"124","United States":"840","Mexico":"484"} print("Country\t\tISO 3166-1 Numeric Code") for country,code in countryDict.items(): # Loop over all the key and value pairs in the dictionary print("{0:12s}\t\t{1:12s}".format(country,code)) """ Explanation: Scripting languages, such as Python, make it easy to automate repetitive tasks. In this workshop, we'll use two of Python's syntactic constructs for iteration - the for loop and the while loop. 6.2 <tt>for</tt> Statements Given an iterable, such as a list, the for loop construct can iterate over each of its values as shown below. End of explanation """ countryList = ["Canada", "United States of America", "Mexico"] for i in range(0,3): # Loop over values of i in the range 0 up to, but not including, 3 print(countryList[i]) """ Explanation: 6.3 <tt>range()</tt> Function Another common use of the <tt>for</tt> loop is to iterate over an index which takes specific values. The range() function generates integers within the range specified by its arguments. End of explanation """ countryList = ["Canada", "United States of America", "Mexico"] # iterate over countryList backwards, starting from the last element while(countryList): print(countryList[-1]) countryList.pop() i = 0 countryList = ["Canada", "United States of America", "Mexico"] while(i < len(countryList)): print("Iteration variable i = {0}, Country = {1}.".format(i,countryList[i])) i += 1 """ Explanation: 6.4 while() Statement Another syntactic construct used for iteration is the while loop. This is generally used in conjunction with the conditional and logical operators which we saw earlier. End of explanation """ countryList = ["Canada", "United States of America", "Mexico"] for country in countryList: if(country == "United States of America"): # if the country name matches, then break out of the for loop break else: # do some processing print(country) """ Explanation: 6.5 <tt>break</tt> and <tt>continue</tt> Statements Now, if some condition is evaluated within the for/while loop and based on that, we wish to exit the loop, we can use the break statement. Note that the break statement exits the innermost loop which contains it. End of explanation """ countryList = ["Canada", "United States of America", "Mexico"] for country in countryList: if(country == "United States of America"): # if the country name matches, then break out of the for loop continue else: # do some processing print(country) """ Explanation: If, instead of exiting the loop, one merely wishes to skip that iteration, then use the continue statement as shown here. End of explanation """ filename = "tmp.txt" fout = open(filename,"w") # The 'r' option indicates that the file is being opened to be read for i in range(0,5): # Read in each line from the file # Do some processing fout.write("i = {0}.\n".format(i)) fout.close() # Once the file has been read, close the file """ Explanation: 7. Python File I/O This is a quick intro to reading and writing plain text files in Python. As we proceed through the workshop, we'll look at more sophisticated ways of reading/writing files, in non-English languages and using specialized Python modules to handle files in formats such as CSV, JSON. 7.1 Writing to a File In order to write to a file, the syntax is very similar. Open the file using the "w" mode instead of the "r" mode. Use the write() method of the file object as shown below. The syntax for the write() method is very similar to print(). Although, it does not automatically insert a newline at the end of the statement as does print(). End of explanation """ filename = "tmp.txt" with open(filename,"w") as fout: for i in range(0,5): fout.write("i = {0}.\n".format(i)) fout.close() """ Explanation: Alternative syntax for writing to file using 'with open' is shown below. End of explanation """ filename = "tmp.txt" fin = open(filename,"r") # The 'r' option indicates that the file is being opened to be read for line in fin: # Read in each line from the file # Do some processing print(line) fin.close() # Once the file has been read, close the file """ Explanation: 7.2 Reading from a file To open a file for reading each of its line use the open() function. Make sure that such a file does exist. Once the file has been read, close it using the close() method of the file object - this will free up system resources being used up by the open file. End of explanation """ filename = "tmp.txt" with open(filename,"r") as fin: for line in fin: # Do some processing print(line) """ Explanation: The code below demonstrates another way to open a file and read each line. With this syntax, the file is automatically closed after the <tt >with</tt> block. End of explanation """ import csv with open("game.csv","wb") as csvfile: csvwriter = csv.writer(csvfile,delimiter=',') csvwriter.writerow(["rock","paper","scissor"]) csvwriter.writerow(["ji","gu","pa"]) csvwriter.writerow(["rock","paper","scissor","lizard","Spock"]) cat game.csv """ Explanation: An input file can also be read in as one string by using the read() method. 7.3. The <tt>csv</tt> module Python's <tt>csv</tt> module provides convenient functionality for reading and writing csv files similar to that available in R. The csv files can then be imported in other statistical packages such as R and Excel. Here is a short example of using the csv module to write consecutive rows in to a comma-separated file. The delimiter can be chosen to be an arbitrary string. End of explanation """ import csv with open("game.csv","r") as csvfile: csvreader = csv.reader(csvfile,delimiter=",") for row in csvreader: print(row) """ Explanation: And this is an example of reading the games.csv file. Each row of the csv file is read in as a list. End of explanation """
amcdawes/QMlabs
Lab 2 - Quantum States.ipynb
mit
import numpy as np from qutip import * """ Explanation: Lab 2 - Quantum States Useful for working examples and problems with photon quantum states. You may notice some similarity to the Jones Calculus ;-) End of explanation """ H = Qobj([[1],[0]]) V = Qobj([[0],[1]]) P45 = Qobj([[1/np.sqrt(2)],[1/np.sqrt(2)]]) M45 = Qobj([[1/np.sqrt(2)],[-1/np.sqrt(2)]]) R = Qobj([[1/np.sqrt(2)],[-1j/np.sqrt(2)]]) L = Qobj([[1/np.sqrt(2)],[1j/np.sqrt(2)]]) V """ Explanation: These are the polarization states: End of explanation """ def HWP(theta): return Qobj([[np.cos(2*theta),np.sin(2*theta)],[np.sin(2*theta),-np.cos(2*theta)]]).tidyup() def LP(theta): return Qobj([[np.cos(theta)**2,np.cos(theta)*np.sin(theta)],[np.sin(theta)*np.cos(theta),np.sin(theta)**2]]).tidyup() def QWP(theta): return Qobj([[np.cos(theta)**2 + 1j*np.sin(theta)**2, (1-1j)*np.sin(theta)*np.cos(theta)], [(1-1j)*np.sin(theta)*np.cos(theta), np.sin(theta)**2 + 1j*np.cos(theta)**2]]).tidyup() QWP(np.pi/4) """ Explanation: Devices: HWP - Half-wave plate axis at $\theta$ to the horizontal LP - Linear polarizer, axis at $\theta$ QWP - Quarter-wave plate, axis at $\theta$ Note, these are functions so you need to call them with a specific value of theta. End of explanation """ H.dag()*H """ Explanation: Example 1) Check that the $|H\rangle$ state is normalized End of explanation """ np.sin? """ Explanation: To show more information on an object, use the question mark after the function or object: End of explanation """ psi = Qobj([[1+1j],[2-1j]]) psi psi.dag() psi.dag().dag() """ Explanation: Example 2) Converting from ket to bra: End of explanation """
InsightLab/data-science-cookbook
2020/05-geographic-information-system/Notebook_Network_Analysis.ipynb
mit
import osmnx as ox import matplotlib.pyplot as plt %matplotlib inline # Specify the name that is used to seach for the data place_name = "Brasil, Ceará, Fortaleza" # Fetch OSM street network from the location graph = ox.graph_from_place(place_name) type(graph) """ Explanation: Recuperando dados do OpenStreetMap O que é o OpenStreetMap? O OpenStreetMap (OSM) é um conjunto de dados que visa criar um mapa editável gratuito do mundo, contendo muitas informações sobre o nosso ambiente, ele é um projeto colaborativo global (crowd sourced). No OSM podemos obter dados, por exemplo, sobre ruas, edifícios e serviços. O OSM tem uma grande base de usuários com mais de 4 milhões e mais de um milhão de contribuintes que atualizam ativamente o banco de dados do OSM com 3 milhões de mudanças por dia. No total, o OSM contém mais de 4 bilhões de nós que formam a base do mundo mapeado digitalmente, e que o OSM oferece (estatísticas de novembro de 2017). O OpenStreetMap não é usado apenas para integrar os mapas do OSM como mapas em segundo plano para visualizações ou mapas on-line, mas também para muitas outras finalidades, como roteamento, geocodificação , educação e pesquisa. O OSM é também amplamente utilizado para realizar planejamentos de respostas humanitárias, por exemplo, em áreas de crise (após desastres naturais) e para fomentar o desenvolvimento econômico (veja mais no site Humanitarian OpenStreetMap Team (HOTOSM)). OSMnx Uma outroa biblioteca com muitas funcionalidades para GIS em Python é o OSMnx que pode ser usado para recuperar, construir, analisar e visualizar redes de ruas do OpenStreetMap, e também recuperar dados sobre pontos de interesse, como restaurantes e escolas. Também é fácil conduzir o roteamento de rede com base em caminhada, ciclismo ou direção, combinando as funcionalidades do OSMnx com um pacote de grafos do python chamado NetworkX. Fazer o download e visualizar os dados do OpenStreetMap com o OSMnx Um dos recursos mais úteis que o OSMnx fornece é uma maneira fácil para recuperar dados do OpenStreetMap (usando API OverPass). Neste tutorial, aprenderemos a baixar e visualizar dados do OSM cobrindo uma área específica de interesse. O OSMnx torna isso muito fácil, pois permite especificar um endereço para recuperar os dados do OpenStreetMap em torno dessa área. Na verdade, o OSMnx usa a mesma API de geocodificação da Nominatim para fazer isso, o que testamos durante o a aula de geocoding. Vamos recuperar os dados do OpenStreetMap (OSM) especificando "Brasil, Ceará, Fortaleza" como o endereço onde os dados devem ser baixados. Podemos baixar o pacote OSMnx no nosso ambiente com o seguitne comando: conda install -c conda-forge osmnx End of explanation """ # Plot the streets fig, ax = ox.plot_graph(graph) """ Explanation: Como podemos ver os dados que recuperamos é um objeto de dados especial chamado networkx.classes.multidigraph.MultiDiGraph. Um DiGraph é um tipo de dados que armazena vértices e arestas com dados ou atributos opcionais. O que podemos ver aqui é que esse tipo de dado pertence a um módulo Python chamado networkx que pode ser usado para criar, manipular e estudar a estrutura, a dinâmica e possui funções especificas para análise em rede de grafo. O módulo Networkx contém algoritmos que podem ser usados para calcular caminhos mais curtos ao longo das redes rodoviárias usando, por exemplo, o algoritmo de Dijkstra ou A* algorithm. Vamos ver como é a nossa rede de ruas. É fácil visualizar o gráfico com osmnx com a função plot_graph(). A função utiliza o Matplotlib para visualizar os dados, portanto, como resultado, ele retorna uma figura de matplotlib: End of explanation """ # Specify the name that is used to seach for the data place_name = "Brasil, Ceará, Fortaleza" # Fetch OSM street network from the location graph_drive = ox.graph_from_place(place_name, network_type='drive') fig, ax = ox.plot_graph(graph_drive) """ Explanation: Agora podemos ver que nosso gráfico contém os vértices ou nós (círculos azuis) e as arestas (linhas cinzas) que conectam esses nós entre si. Também é possível recuperar outros tipos de recursos de dados do OSM com osmnx, como edifícios ou pontos de interesse (POIs). Análise de rede em Python Encontrar um caminho mais curto usando uma rede de ruas específica, é um problema comum em GIS. Isto ferramente de extrema importância no mundo moderno e está presente em diversas aplicações, por exemplo, os aplicativos de GPS, que são aplicações usadas diariamente para encontrar o roteamento usando algoritmos específicos para encontrar a rota ideal entre dois (ou múltiplos) pontos. O módulo Networkx fornece muitas ferramentas que podem ser usadas para analisar redes de várias maneiras diferentes. Também contém algoritmos como o algoritmo deDijkstra ou A* que são comumente usados para encontrar caminhos mais curtos ao longo da rede de transporte. Em seguida, testaremos as funcionalidades de roteamento do osmnx encontrando um caminho mais curto entre dois pontos baseados em estradas possuem acesso para carros. Vamos primeiro fazer o download dos dados do OSM de Fortaleza, mas desta vez incluir apenas os segmentos da rua que são tranquilos. No omsnx é possível recuperar apenas as ruas que são dirigíveis especificando ''drive'' no parâmetro network_type que pode ser usado para especificar que tipos de ruas são recuperadas do OpenStreetMap (outras possibilidades são walk e bike). End of explanation """ # Retrieve only edges from the graph nodes, edges = ox.graph_to_gdfs(graph_drive) # Check the data type print('Data type of edges: ', type(edges)) # Check columns print(edges.columns) edges.head(5) """ Explanation: Agora que foi recuperado apenas as ruas onde é possível dirigir com um carro. Vamos confirmar isso dando uma olhada nos atributos da rede de ruas. A maneira mais fácil de fazer isso é converter o gráfico (nós e arestas) em GeoDataFrames (geopandas). A conversão de um gráfico em um GeoDataFrame pode ser feita utilizando a função graph_to_gdfs(), que retorna os nodes (vértices) e as edges (arestas). End of explanation """ # plot only the road's network edges.plot() """ Explanation: Então podemos observar que temos muitas colunas em nosso GeoDataFrame. A maioria das colunas são auto-explicativas, mas a tabela a seguir descreve todas elas. | Column | Description | Data type | |------------------------------------------------------------|-----------------------------|-------------------| | bridge | Bridge feature | boolean | | geometry | Geometry of the feature | Shapely.geometry | | highway | Tag for roads (road type) | str / list | | lanes | Number of lanes | int (or nan) | | lenght | Length of feature (meters) | float | | maxspeed| maximum legal speed limit | int /list | | name | Name of the (street) element| str (or nan) | | oneway | One way road | boolean | | osmid | Unique ids for the element | list | | u | The first node of edge | int | | v | The last node of edge | int | Como os nosso dados de nós e arestas são GeoDataFrames, podemos visualizar eles separadamente, utilizando apenas a função plot(). End of explanation """ from shapely.geometry import Point # approximate coord of computer department: -3.746088, -38.574236 # approximate coord of benfica shopping: -3.739631, -38.540785 origin = Point(-3.746088, -38.574236) destiny = Point(-3.739631, -38.540785) """ Explanation: Análise de menor caminho Vamos agora calcular o caminho mais curto entre dois pontos. Primeiro, precisamos especificar os locais de origem e destino de nossa rota. O nosso objetivo será obter a rota e a distância percorrida em metros, do departamento de computação para o Shopping Benfica, utilizando a rede rodoviária. Primeiro precisamos definir a localização da nossa origiem e destino. Para isso podemos usar o OpenStreeMapo ou Google Maps para obter as coordenadas. End of explanation """ import geopandas as gpd # filename of the ais data ais_fp = 'data/ais.shp' # load the ais data ais_gdf = gpd.read_file(ais_fp) # separate only the desired data poly_ais_5 = ais_gdf.loc[ais_gdf['AIS'] == 5, 'geometry'].values[0] poly_ais_6 = ais_gdf.loc[ais_gdf['AIS'] == 6, 'geometry'].values[0] poly = poly_ais_5.union(poly_ais_6) poly """ Explanation: Em seguida precisamos de uma rede de ruas que apresente pelo menos os pontos de origem e destino. A alternativa mais simples seria carregar toda a rede de ruas da cidade de Fortaleza, porém isso acrescentaria um custo de computação amais. Utilizando os nosso conhecimento, vamos diminuir a rede de busca. Visualizando as divisões administrativas das AIS, visto na aula passada, podemos ver que o campus do pici está na AIS 6 e o shopping na AIS 5, e que as duas são adjacentes. O módulo OSMnx possui diversas opções para carregar dados do OSM, é uma delas é passando um polígono da área desejada em que se deseja obter o grafo. Vamos utilizar as divisões administrativas das AIS para obter um grafo reduzido para a nossa análise. End of explanation """ graph = ox.graph_from_polygon(poly, network_type='drive') fig, ax = ox.plot_graph(graph) """ Explanation: Agora vamos utilizar o OSMnx para obter o grafo dentro do nosso polygon. Para isso podemos utilizar a função graph_from_polygon(). Lembrando que queremos somente a rede de carros, então usaremos o parâmetro network_type. End of explanation """ # Get the x and y coordinates origin_xy = (origin.x, origin.y) dest_xy = (destiny.x, destiny.y) # Find the closest origin and target nodes from the graph (the ids of them) orig_node = ox.get_nearest_node(graph, origin_xy, method='euclidean') target_node = ox.get_nearest_node(graph, dest_xy, method='euclidean') # Show the results print(orig_node) print(target_node) """ Explanation: Para calcular a menor rota no grafo precisamos que os nodes de origem e destino estejam presentes no grafo. Para isso vamos calcular o nó mais próximo do nosso ponto de origem e do nosso ponto de destino. End of explanation """ import networkx as nx # Calculate the shortest path route = nx.shortest_path(G=graph, source=orig_node, target=target_node, weight='length') # Show what we have print(route) """ Explanation: Agora que já temos os nós de origem e destino, vamos calcular o menor caminho na rede de ruas utilizando o algoritmo de Dijkstra. Para isso podemos usar a função shortest_path do networkx. End of explanation """ # Plot the shortest path fig, ax = ox.plot_graph_route(graph, route, orig_dest_size=20) """ Explanation: Como resultado, obtemos uma lista de todos os nós que estão no caminho mais curto. Poderíamos extrair os locais desses nós do nodes_proj GeoDataFrame e criar uma LineString dos pontos, mas, felizmente, o OSMnx pode fazer isso para nós e podemos traçar o caminho mais curto usando a funçãoplot_graph_route(): End of explanation """ # Retrieve only edges from the graph nodes, edges = ox.graph_to_gdfs(graph) # Get the nodes along the shortest path route_nodes = nodes.loc[route] route_nodes.head() """ Explanation: Para calcular o tamanho da nossa rota em metros, vamos primeiro criar um LineString da nossa rota, depois mudar a e projeção e finalmente usaremos o método length Para criar o nosso LineString precisamos das informações de latitude e longitude, porém só temos o id dos nós. Então primeiro vamos filtrar dos nossos dados o nós da nossa rota pelo id. End of explanation """ # Change projection of the data new_epsg = 31984 # more accurate for northeast region route_nodes_proj = route_nodes.to_crs(epsg=new_epsg) route_nodes_proj.head() """ Explanation: Como pode ser visto as coordenadas estão na projeção Mercator, que é a mesma utilizada pelo Google (de onde definimos as nossas coordenadas). Para podermos calcular a distância em metros, vamos primeiro transformar a projeção dos dados para a projeção SIRGAS 2000 / UTM zone 24S (EPSG 31984) End of explanation """ from shapely.geometry import LineString, Point # Create a geometry for the shortest path route_line = LineString(list(route_nodes_proj.geometry.values)) print("tamanho da rota em metros: {:.3f} m".format(route_line.length)) route_line """ Explanation: Finalmente podemos criar o nosso LineString usando a projeção apropriada para a nossa tarefa. End of explanation """
michrawson/nyu_ml_lectures
notebooks/07.1 Case Study - Large Scale Text Classification.ipynb
cc0-1.0
from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(min_df=1) vectorizer.fit([ "The cat sat on the mat.", ]) vectorizer.vocabulary_ """ Explanation: Large Scale Text Classification for Sentiment Analysis Scalability Issues The sklearn.feature_extraction.text.CountVectorizer and sklearn.feature_extraction.text.TfidfVectorizer classes suffer from a number of scalability issues that all stem from the internal usage of the vocabulary_ attribute (a Python dictionary) used to map the unicode string feature names to the integer feature indices. The main scalability issues are: Memory usage of the text vectorizer: the all the string representations of the features are loaded in memory Parallelization problems for text feature extraction: the vocabulary_ would be a shared state: complex synchronization and overhead Impossibility to do online or out-of-core / streaming learning: the vocabulary_ needs to be learned from the data: its size cannot be known before making one pass over the full dataset To better understand the issue let's have a look at how the vocabulary_ attribute work. At fit time the tokens of the corpus are uniquely indentified by a integer index and this mapping stored in the vocabulary: End of explanation """ X = vectorizer.transform([ "The cat sat on the mat.", "This cat is a nice cat.", ]).toarray() print(len(vectorizer.vocabulary_)) print(vectorizer.get_feature_names()) print(X) """ Explanation: The vocabulary is used at transform time to build the occurrence matrix: End of explanation """ vectorizer = CountVectorizer(min_df=1) vectorizer.fit([ "The cat sat on the mat.", "The quick brown fox jumps over the lazy dog.", ]) vectorizer.vocabulary_ """ Explanation: Let's refit with a slightly larger corpus: End of explanation """ X = vectorizer.transform([ "The cat sat on the mat.", "This cat is a nice cat.", ]).toarray() print(len(vectorizer.vocabulary_)) print(vectorizer.get_feature_names()) print(X) """ Explanation: The vocabulary_ is the (logarithmically) growing with the size of the training corpus. Note that we could not have built the vocabularies in parallel on the 2 text documents as they share some words hence would require some kind of shared datastructure or synchronization barrier which is complicated to setup, especially if we want to distribute the processing on a cluster. With this new vocabulary, the dimensionality of the output space is now larger: End of explanation """ import os sentiment140_folder = os.path.join('datasets', 'sentiment140') training_csv_file = os.path.join(sentiment140_folder, 'training.1600000.processed.noemoticon.csv') testing_csv_file = os.path.join(sentiment140_folder, 'testdata.manual.2009.06.14.csv') """ Explanation: The Sentiment 140 Dataset To illustrate the scalability issues of the vocabulary-based vectorizers, let's load a more realistic dataset for a classical text classification task: sentiment analysis on tweets. The goal is to tell apart negative from positive tweets on a variety of topics. Assuming that the ../fetch_data.py script was run successfully the following files should be available: End of explanation """ from helpers import read_sentiment_csv %time text_train_all, target_train_all = read_sentiment_csv(training_csv_file, max_count=200000) len(text_train_all), len(target_train_all) """ Explanation: Those files were downloaded from the research archive of the http://www.sentiment140.com/ project. The first file was gathered using the twitter streaming API by running stream queries for the positive ":)" and negative ":(" emoticons to collect tweets that are explicitly positive or negative. To make the classification problem non-trivial, the emoticons were stripped out of the text in the CSV files. Let's parse the CSV files and load everything in memory. As loading everything can take up to 2GB, let's limit the collection to 100K tweets of each (positive and negative) out of the total of 1.6M tweets. End of explanation """ for text in text_train_all[:3]: print(text + "\n") print(target_train_all[:3]) """ Explanation: Let's display the first samples: End of explanation """ for text in text_train_all[-3:]: print(text + "\n") print(target_train_all[-3:]) """ Explanation: A polarity of "-1" means negative while a polarity of "1" means positive. All the positive tweets are at the end of the file: End of explanation """ from sklearn.cross_validation import train_test_split text_train_small, text_validation, target_train_small, target_validation = train_test_split( text_train_all, np.array(target_train_all), test_size=.5, random_state=42) len(text_train_small) (target_train_small == -1).sum(), (target_train_small == 1).sum() len(text_validation) (target_validation == -1).sum(), (target_validation == 1).sum() """ Explanation: Let's split the training CSV file into a smaller training set and a validation set with 100k random tweets each: End of explanation """ text_test_all, target_test_all = read_sentiment_csv(testing_csv_file) len(text_test_all), len(target_test_all) """ Explanation: Let's open the manually annotated tweet files. The evaluation set also has neutral tweets with a polarity of "2" which we ignore. We can build the final evaluation set with only the positive and negative tweets of the evaluation CSV file: End of explanation """ from sklearn.utils.murmurhash import murmurhash3_bytes_u32 # encode for python 3 compatibility for word in "the cat sat on the mat".encode("utf-8").split(): print("{0} => {1}".format( word, murmurhash3_bytes_u32(word, 0) % 2 ** 20)) """ Explanation: The Hashing Trick Remember the bag of word representation using a vocabulary based vectorizer: <img src="figures/bag_of_words.svg" width="100%"> To workaround the limitations of the vocabulary-based vectorizers, one can use the hashing trick. Instead of building and storing an explicit mapping from the feature names to the feature indices in a Python dict, we can just use a hash function and a modulus operation: <img src="figures/hashing_vectorizer.svg" width="100%"> End of explanation """ from sklearn.feature_extraction.text import HashingVectorizer h_vectorizer = HashingVectorizer(encoding='latin-1') h_vectorizer """ Explanation: This mapping is completely stateless and the dimensionality of the output space is explicitly fixed in advance (here we use a modulo 2 ** 20 which means roughly 1M dimensions). The makes it possible to workaround the limitations of the vocabulary based vectorizer both for parallelizability and online / out-of-core learning. The HashingVectorizer class is an alternative to the TfidfVectorizer class with use_idf=False that internally uses the murmurhash hash function: End of explanation """ analyzer = h_vectorizer.build_analyzer() analyzer('This is a test sentence.') """ Explanation: It shares the same "preprocessor", "tokenizer" and "analyzer" infrastructure: End of explanation """ %time X_train_small = h_vectorizer.transform(text_train_small) """ Explanation: We can vectorize our datasets into a scipy sparse matrix exactly as we would have done with the CountVectorizer or TfidfVectorizer, except that we can directly call the transform method: there is no need to fit as HashingVectorizer is a stateless transformer: End of explanation """ X_train_small """ Explanation: The dimension of the output is fixed ahead of time to n_features=2 ** 20 by default (nearly 1M features) to minimize the rate of collision on most classification problem while having reasonably sized linear models (1M weights in the coef_ attribute): End of explanation """ from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.pipeline import Pipeline h_pipeline = Pipeline(( ('vec', HashingVectorizer(encoding='latin-1')), ('clf', PassiveAggressiveClassifier(C=1, n_iter=1)), )) %time h_pipeline.fit(text_train_small, target_train_small).score(text_validation, target_validation) """ Explanation: As only the non-zero elements are stored, n_features has little impact on the actual size of the data in memory. We can combine the hashing vectorizer with a Passive-Aggressive linear model in a pipeline: End of explanation """ h_pipeline.score(text_test_all, target_test_all) """ Explanation: Let's check that the score on the validation set is reasonably in line with the set of manually annotated tweets: End of explanation """ from sklearn.feature_extraction.text import TfidfVectorizer vocabulary_vec = TfidfVectorizer(encoding='latin-1', use_idf=False) vocabulary_pipeline = Pipeline(( ('vec', vocabulary_vec), ('clf', PassiveAggressiveClassifier(C=1, n_iter=1)), )) %time vocabulary_pipeline.fit(text_train_small, target_train_small).score(text_validation, target_validation) """ Explanation: As the text_train_small dataset is not that big we can still use a vocabulary based vectorizer to check that the hashing collisions are not causing any significant performance drop on the validation set (WARNING this is twice as slow as the hashing vectorizer version, skip this cell if your computer is too slow): End of explanation """ len(vocabulary_vec.vocabulary_) """ Explanation: We get almost the same score but almost twice as slower with also a big, slow to (un)pickle datastructure in memory: End of explanation """ from random import Random class InfiniteStreamGenerator(object): """Simulate random polarity queries on the twitter streaming API""" def __init__(self, texts, targets, seed=0, batchsize=100): self.texts_pos = [text for text, target in zip(texts, targets) if target > 0] self.texts_neg = [text for text, target in zip(texts, targets) if target <= 0] self.rng = Random(seed) self.batchsize = batchsize def next_batch(self, batchsize=None): batchsize = self.batchsize if batchsize is None else batchsize texts, targets = [], [] for i in range(batchsize): # Select the polarity randomly target = self.rng.choice((-1, 1)) targets.append(target) # Combine 2 random texts of the right polarity pool = self.texts_pos if target > 0 else self.texts_neg text = self.rng.choice(pool) + " " + self.rng.choice(pool) texts.append(text) return texts, targets infinite_stream = InfiniteStreamGenerator(text_train_small, target_train_small) texts_in_batch, targets_in_batch = infinite_stream.next_batch(batchsize=3) for t in texts_in_batch: print(t + "\n") targets_in_batch """ Explanation: More info and reference for the original papers on the Hashing Trick in the following site as well as a description specific to language here. Out-of-Core learning Out-of-Core learning is the task of training a machine learning model on a dataset that does not fit in the main memory. This requires the following conditions: a feature extraction layer with fixed output dimensionality knowing the list of all classes in advance (in this case we only have positive and negative tweets) a machine learning algorithm that supports incremental learning (the partial_fit method in scikit-learn). Let us simulate an infinite tweeter stream that can generate batches of annotated tweet texts and there polarity. We can do this by recombining randomly pairs of positive or negative tweets from our fixed dataset: End of explanation """ n_batches = 1000 validation_scores = [] training_set_size = [] # Build the vectorizer and the classifier h_vectorizer = HashingVectorizer(encoding='latin-1') clf = PassiveAggressiveClassifier(C=1) # Extract the features for the validation once and for all X_validation = h_vectorizer.transform(text_validation) classes = np.array([-1, 1]) n_samples = 0 for i in range(n_batches): texts_in_batch, targets_in_batch = infinite_stream.next_batch() n_samples += len(texts_in_batch) # Vectorize the text documents in the batch X_batch = h_vectorizer.transform(texts_in_batch) # Incrementally train the model on the new batch clf.partial_fit(X_batch, targets_in_batch, classes=classes) if n_samples % 100 == 0: # Compute the validation score of the current state of the model score = clf.score(X_validation, target_validation) validation_scores.append(score) training_set_size.append(n_samples) if i % 100 == 0: print("n_samples: {0}, score: {1:.4f}".format(n_samples, score)) """ Explanation: We can now use our infinte tweet source to train an online machine learning algorithm using the hashing vectorizer. Note the use of the partial_fit method of the PassiveAggressiveClassifier instance in place of the traditional call to the fit method that needs access to the full training set. From time to time, we evaluate the current predictive performance of the model on our validation set that is guaranteed to not overlap with the infinite training set source: End of explanation """ plt.plot(training_set_size, validation_scores) plt.ylim(0.5, 1) plt.xlabel("Number of samples") plt.ylabel("Validation score") """ Explanation: We can now plot the collected validation score values, versus the number of samples generated by the infinite source and feed to the model: End of explanation """
atlury/deep-opencl
DL0110EN/5.1.1dropoutPredictin.ipynb
lgpl-3.0
import torch import matplotlib.pyplot as plt import torch.nn as nn import torch.nn.functional as F import numpy as np from matplotlib.colors import ListedColormap """ Explanation: <div class="alert alert-block alert-info" style="margin-top: 20px"> <a href="http://cocl.us/pytorch_link_top"><img src = "http://cocl.us/Pytorch_top" width = 950, align = "center"></a> <img src = "https://ibm.box.com/shared/static/ugcqz6ohbvff804xp84y4kqnvvk3bq1g.png" width = 200, align = "center"> <h1 align=center><font size = 5>Using Dropout for Classification </font></h1> # Table of Contents in this lab, you will see how adding dropout to your model will decrease overfitting. <div class="alert alert-block alert-info" style="margin-top: 20px"> <li><a href="#ref0">Make Some Data</a></li> <li><a href="#ref1">Create the Model and Cost Function the Pytorch way</a></li> <li><a href="#ref2">Batch Gradient Descent</a></li> <br> <p></p> Estimated Time Needed: <strong>20 min</strong> </div> <hr> Import all the libraries that you need for this lab: End of explanation """ def plot_decision_regions_3class(data_set,model=None): cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA','#00AAFF']) cmap_bold = ListedColormap(['#FF0000', '#00FF00','#00AAFF']) X=data_set.x.numpy() y=data_set.y.numpy() h = .02 x_min, x_max = X[:, 0].min()-0.1 , X[:, 0].max()+0.1 y_min, y_max = X[:, 1].min()-0.1 , X[:, 1].max() +0.1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h),np.arange(y_min, y_max, h)) newdata=np.c_[xx.ravel(), yy.ravel()] #XX=torch.torch.Tensor(newdata) #_,yhat=torch.max(model(XX),1) #yhat=yhat.numpy().reshape(xx.shape) Z=data_set.fun(newdata).flatten() f=np.zeros(Z.shape) f[Z>0]=1 f=f.reshape(xx.shape) if model!=None: model.eval() XX=torch.torch.Tensor(newdata) _,yhat=torch.max(model(XX),1) yhat=yhat.numpy().reshape(xx.shape) plt.pcolormesh(xx, yy, yhat, cmap=cmap_light) plt.contour(xx, yy, f, cmap=plt.cm.Paired) else: plt.contour(xx, yy, f, cmap=plt.cm.Paired) plt.pcolormesh(xx, yy, f, cmap=cmap_light) plt.title("decision region vs True decision boundary") plt.legend() """ Explanation: Use this function only for plotting: End of explanation """ def accuracy(model,data_set): _,yhat=torch.max(model(data_set.x),1) return (yhat==data_set.y).numpy().mean() """ Explanation: Use this function to calculate accuracy: End of explanation """ from torch.utils.data import Dataset, DataLoader class Data(Dataset): def __init__(self,N_SAMPLES = 1000,noise_std=0.1,train=True): a=np.matrix([-1,1,2,1,1,-3,1]).T self.x = np.matrix(np.random.rand(N_SAMPLES,2)) self.f=np.array(a[0]+(self.x)*a[1:3]+np.multiply(self.x[:,0], self.x[:,1])*a[4]+np.multiply(self.x, self.x)*a[5:7]).flatten() self.a=a self.y=np.zeros(N_SAMPLES) self.y[self.f> 0]=1 self.y=torch.from_numpy(self.y).type(torch.LongTensor) self.x=torch.from_numpy(self.x).type(torch.FloatTensor) self.x = self.x+noise_std*torch.randn(self.x.size()) self.f=torch.from_numpy(self.f) self.a=a if train==True: torch.manual_seed(1) self.x = self.x+noise_std*torch.randn(self.x.size()) torch.manual_seed(0) def __getitem__(self,index): return self.x[index],self.y[index] def __len__(self): return self.len def plot(self): X=data_set.x.numpy() y=data_set.y.numpy() h = .02 x_min, x_max = X[:, 0].min() , X[:, 0].max() y_min, y_max = X[:, 1].min(), X[:, 1].max() xx, yy = np.meshgrid(np.arange(x_min, x_max, h),np.arange(y_min, y_max, h)) Z=data_set.fun(np.c_[xx.ravel(), yy.ravel()]).flatten() f=np.zeros(Z.shape) f[Z>0]=1 f=f.reshape(xx.shape) plt.title('True decision boundary and sample points with noise ') plt.plot(self.x[self.y==0,0].numpy(),self.x[self.y==0,1].numpy(),'bo',label='y=0' ) plt.plot(self.x[self.y==1,0].numpy(), self.x[self.y==1,1].numpy(),'ro',label='y=1' ) plt.contour(xx, yy, f, cmap=plt.cm.Paired ) plt.xlim(0,1) plt.ylim(0,1) plt.legend() def fun(self,x): x=np.matrix(x) out=np.array(self.a[0]+(x)*self.a[1:3]+np.multiply(x[:,0], x[:,1])*self.a[4]+np.multiply(x, x)*self.a[5:7]) out=np.array(out) return out """ Explanation: <a id="ref0"></a> <h2 align=center>Get Some Data </h2> Create a nonlinearly separable dataset: End of explanation """ data_set=Data(noise_std=0.1) data_set.plot() """ Explanation: Create a dataset object: End of explanation """ torch.manual_seed(0) validation_set=Data(train=False) """ Explanation: Get some validation data: End of explanation """ class Net(nn.Module): def __init__(self,in_size,n_hidden,out_size,p=0): super(Net,self).__init__() self.drop=nn.Dropout(p=p) self.linear1=nn.Linear(in_size,n_hidden) self.linear2=nn.Linear(n_hidden,n_hidden) self.linear3=nn.Linear(n_hidden,out_size) def forward(self,x): x=F.relu(self.linear1(x)) x=self.drop(x) x=F.relu(self.linear2(x)) x=self.drop(x) x=self.linear3(x) return x """ Explanation: <a id="ref1"></a> <h2 align=center>Create the Model, Optimizer, and Total Loss Function (cost)</h2> Create a custom module with three layers. <code>in_size</code> is the size of the input features, <code>n_hidden</code> is the size of the layers, and <code>out_size</code> is the size. <code>p</code> is the dropout probability. The default is 0, that is, no dropout. End of explanation """ model=Net(2,300,2) model_drop=Net(2,300,2,p=0.5) """ Explanation: Create two model objects: model had no dropout and model_drop has a dropout probability of 0.5: End of explanation """ model_drop.train() """ Explanation: <a id="ref2"></a> <h2 align=center>Train the Model via Mini-Batch Gradient Descent </h2> Set the model using dropout to training mode; this is the default mode, but it's a good practice: End of explanation """ optimizer_ofit = torch.optim.Adam(model.parameters(), lr=0.01) optimizer_drop = torch.optim.Adam(model_drop.parameters(), lr=0.01) criterion = torch.nn.CrossEntropyLoss() """ Explanation: Train the model by using the Adam optimizer. See the unit on other optimizers. Use the Cross Entropy Loss: End of explanation """ LOSS={} LOSS['training data no dropout']=[] LOSS['validation data no dropout']=[] LOSS['training data dropout']=[] LOSS['validation data dropout']=[] """ Explanation: Initialize a dictionary that stores the training and validation loss for each model: End of explanation """ epochs=500 for epoch in range(epochs): #make a prediction for both models yhat = model(data_set.x) yhat_drop = model_drop(data_set.x) #calculate the lossf or both models loss = criterion(yhat, data_set.y) loss_drop = criterion(yhat_drop, data_set.y) #store the loss for both the training and validation data for both models LOSS['training data no dropout'].append(loss.item()) LOSS['validation data no dropout'].append(criterion(model(validation_set.x), validation_set.y).item()) LOSS['training data dropout'].append(loss_drop.item()) model_drop.eval() LOSS['validation data dropout'].append(criterion(model_drop(validation_set.x), validation_set.y).item()) model_drop.train() #clear gradient optimizer_ofit.zero_grad() optimizer_drop.zero_grad() #Backward pass: compute gradient of the loss with respect to all the learnable parameters loss.backward() loss_drop.backward() #the step function on an Optimizer makes an update to its parameters optimizer_ofit.step() optimizer_drop.step() """ Explanation: Run 500 iterations of batch gradient decent: End of explanation """ model_drop.eval() """ Explanation: Set the model with dropout to evaluation mode: End of explanation """ accuracy(model,validation_set) """ Explanation: Test the model without dropout on the validation data: End of explanation """ accuracy(model_drop,validation_set) """ Explanation: Test the model with dropout on the validation data: End of explanation """ plot_decision_regions_3class(data_set) """ Explanation: You see that the model with dropout performs better on the validation data. Plot the decision boundary and the prediction of the networks in different colors: true function End of explanation """ plot_decision_regions_3class(data_set,model) """ Explanation: model without dropout End of explanation """ plot_decision_regions_3class(data_set,model_drop) """ Explanation: model with dropout End of explanation """ plt.figure(figsize=(6.1, 10)) for key, value in LOSS.items(): plt.plot(np.log(np.array(value)),label=key) plt.legend() plt.xlabel("iterations") plt.ylabel("Log of cost or total loss") """ Explanation: You can see that the model using dropout does better at tracking the function that generated the data. Plot out the loss for the training and validation data on both models: End of explanation """
nick-youngblut/SIPSim
ipynb/bac_genome/n1147/.ipynb_checkpoints/atomIncorp_taxaIncorp_HMW-HR-SIP_run1-checkpoint.ipynb
mit
import os import glob import itertools import nestly %load_ext rpy2.ipython %load_ext pushnote %%R library(ggplot2) library(dplyr) library(tidyr) library(gridExtra) """ Explanation: Goal Follow-up to: atomIncorp_taxaIncorp Determining the effect of 'heavy' BD window (number of windows & window sizes) on HR-SIP accuracy Notes: using hierarchical multi-window method heaviest window first all non-incorporators found for that window are used for next window this repeats for all windows global adjustment of p-values (should be just one test per taxon) Variable parameters: 'heavy' BD window sizes Init End of explanation """ ## min G+C cutoff min_GC = 13.5 ## max G+C cutoff max_GC = 80 ## max G+C shift max_13C_shift_in_BD = 0.036 min_BD = min_GC/100.0 * 0.098 + 1.66 max_BD = max_GC/100.0 * 0.098 + 1.66 max_BD = max_BD + max_13C_shift_in_BD print 'Min BD: {}'.format(min_BD) print 'Max BD: {}'.format(max_BD) """ Explanation: BD min/max End of explanation """ # paths workDir = '/home/nick/notebook/SIPSim/dev/bac_genome1147/' buildDir = os.path.join(workDir, 'atomIncorp_taxaIncorp_HMW-HR-SIP') dataDir = os.path.join(workDir, 'atomIncorp_taxaIncorp') if not os.path.isdir(buildDir): os.makedirs(buildDir) %cd $buildDir # making an experimental design file for qSIP x = range(1,7) y = ['control', 'treatment'] expDesignFile = os.path.join(buildDir, 'qSIP_exp_design.txt') with open(expDesignFile, 'wb') as outFH: for i,z in itertools.izip(x,itertools.cycle(y)): line = '\t'.join([str(i),z]) outFH.write(line + '\n') !head $expDesignFile """ Explanation: Nestly assuming fragments already simulated End of explanation """ # building tree structure nest = nestly.Nest() # varying params nest.add('percIncorp', [0, 15, 25, 50, 100]) nest.add('percTaxa', [1, 5, 10, 25, 50]) nest.add('rep', range(1,11)) ## set params nest.add('abs', ['1e9'], create_dir=False) nest.add('np', [10], create_dir=False) nest.add('Monte_rep', [100000], create_dir=False) nest.add('subsample_dist', ['lognormal'], create_dir=False) nest.add('subsample_mean', [9.432], create_dir=False) nest.add('subsample_scale', [0.5], create_dir=False) nest.add('subsample_min', [10000], create_dir=False) nest.add('subsample_max', [30000], create_dir=False) nest.add('min_BD', [min_BD], create_dir=False) nest.add('max_BD', [max_BD], create_dir=False) nest.add('DBL_scaling', [0.5], create_dir=False) nest.add('bandwidth', [0.8], create_dir=False) nest.add('heavy_BD_min', [1.71], create_dir=False) nest.add('heavy_BD_max', [1.75], create_dir=False) nest.add('topTaxaToPlot', [100], create_dir=False) nest.add('padj', [0.1], create_dir=False) nest.add('log2', [0.25], create_dir=False) ### input/output files nest.add('buildDir', [buildDir], create_dir=False) nest.add('exp_design', [expDesignFile], create_dir=False) # building directory tree nest.build(buildDir) # bash file to run bashFile = os.path.join(buildDir, 'SIPSimRun.sh') """ Explanation: Nestly params End of explanation """ files = !find . -name "*.json" dirs = [os.path.split(x)[0] for x in files] srcFiles = ['OTU_abs1e9_PCR_sub_w.txt', 'OTU_abs1e9_PCR_sub_meta.txt', 'BD-shift_stats.txt'] for d in dirs: for f in srcFiles: f1 = os.path.join(dataDir, d, f) f2 = os.path.join(buildDir, d, f) cmd = 'cp -f {} {}'.format(f1, f2) !$cmd """ Explanation: Copying input files End of explanation """ bashFileTmp = os.path.splitext(bashFile)[0] + '_HRSIP_multi.sh' bashFileTmp %%writefile $bashFileTmp #!/bin/bash # phyloseq ## making phyloseq object from OTU table SIPSimR phyloseq_make \ OTU_abs{abs}_PCR_sub_w.txt \ -s OTU_abs{abs}_PCR_sub_meta.txt \ > OTU_abs{abs}_PCR_sub.physeq ## HR SIP pipeline SIPSim HRSIP \ --hier \ --log2 {log2} \ --hypo greater \ --cont 1,3,5 \ --treat 2,4,6 \ -w 1.71-1.75 \ --prefix OTU_abs{abs}_PCR_sub_BD3-1 \ OTU_abs{abs}_PCR_sub.physeq ## HR SIP pipeline SIPSim HRSIP \ --hier \ --log2 {log2} \ --hypo greater \ --cont 1,3,5 \ --treat 2,4,6 \ -w 1.69-1.74,1.73-1.78 \ --prefix OTU_abs{abs}_PCR_sub_BD3-2 \ OTU_abs{abs}_PCR_sub.physeq SIPSim HRSIP \ --hier \ --log2 {log2} \ --hypo greater \ --cont 1,3,5 \ --treat 2,4,6 \ -w 1.70-1.73,1.72-1.75,1.74-1.77 \ --prefix OTU_abs{abs}_PCR_sub_BD3-3 \ OTU_abs{abs}_PCR_sub.physeq SIPSim HRSIP \ --hier \ --log2 {log2} \ --hypo greater \ --cont 1,3,5 \ --treat 2,4,6 \ -w 1.69-1.73,1.72-1.76,1.75-1.79 \ --prefix OTU_abs{abs}_PCR_sub_BD3-4 \ OTU_abs{abs}_PCR_sub.physeq !chmod 777 $bashFileTmp !cd $workDir; \ nestrun --template-file $bashFileTmp -d $buildDir --log-file HR-SIP_multi.log -j 10 %pushnote HMW-HR-SIP complete """ Explanation: Hierarchical multi-window HR-SIP End of explanation """ bashFileTmp = os.path.splitext(bashFile)[0] + '_cMtx.sh' bashFileTmp %%writefile $bashFileTmp #!/bin/bash # HR-SIP multiple 'heavy' BD windows SIPSimR DESeq2_confuseMtx \ --libs 2,4,6 \ --padj {padj} \ -o DESeq2_BD3-1-cMtx \ BD-shift_stats.txt \ OTU_abs1e9_PCR_sub_BD3-1_DESeq2 # HR-SIP multiple 'heavy' BD windows SIPSimR DESeq2_confuseMtx \ --libs 2,4,6 \ --padj {padj} \ -o DESeq2_BD3-2-cMtx \ BD-shift_stats.txt \ OTU_abs1e9_PCR_sub_BD3-2_DESeq2 # HR-SIP multiple 'heavy' BD windows SIPSimR DESeq2_confuseMtx \ --libs 2,4,6 \ --padj {padj} \ -o DESeq2_BD3-3-cMtx \ BD-shift_stats.txt \ OTU_abs1e9_PCR_sub_BD3-3_DESeq2 # HR-SIP multiple 'heavy' BD windows SIPSimR DESeq2_confuseMtx \ --libs 2,4,6 \ --padj {padj} \ -o DESeq2_BD3-4-cMtx \ BD-shift_stats.txt \ OTU_abs1e9_PCR_sub_BD3-4_DESeq2 !chmod 777 $bashFileTmp !cd $workDir; \ nestrun --template-file $bashFileTmp -d $buildDir --log-file cMtx.log -j 10 """ Explanation: Making confusion matrices End of explanation """ def agg_cMtx(prefix): # all data #!nestagg delim \ # -d $buildDir \ # -k percIncorp,percTaxa,rep \ # -o $prefix-cMtx_data.txt \ # --tab \ # $prefix-cMtx_data.txt # overall x = prefix + '-cMtx_overall.txt' !nestagg delim \ -d $buildDir \ -k percIncorp,percTaxa,rep \ -o $x \ --tab \ $x # by class x = prefix + '-cMtx_byClass.txt' !nestagg delim \ -d $buildDir \ -k percIncorp,percTaxa,rep \ -o $x \ --tab \ $x agg_cMtx('DESeq2_BD3-1') agg_cMtx('DESeq2_BD3-2') agg_cMtx('DESeq2_BD3-3') agg_cMtx('DESeq2_BD3-4') %pushnote HMW-HR-SIP complete! """ Explanation: Aggregating the confusion matrix data End of explanation """ F = os.path.join(buildDir, '*-cMtx_byClass.txt') files = glob.glob(F) files %%R -i files df_byClass = list() for (f in files){ ff = strsplit(f, '/') %>% unlist fff = ff[length(ff)] df_byClass[[fff]] = read.delim(f, sep='\t') } df_byClass = do.call(rbind, df_byClass) df_byClass$file = gsub('\\.[0-9]+$', '', rownames(df_byClass)) df_byClass$method = gsub('-cMtx.+', '', df_byClass$file) rownames(df_byClass) = 1:nrow(df_byClass) df_byClass %>% head(n=3) %%R # renaming method rename = data.frame(method = c('DESeq2_BD3-1', 'DESeq2_BD3-2', 'DESeq2_BD3-3', 'DESeq2_BD3-4'), method_new = c('1.71-1.75','1.69-1.74,1.73-1.78', '1.70-1.73,1.72-1.75,1.74-1.77', '1.69-1.73,1.72-1.76,1.75-1.79')) df_byClass = inner_join(df_byClass, rename, c('method'='method')) %>% select(-method) %>% rename('method' = method_new) df_byClass$method = factor(df_byClass$method, levels=rename$method_new %>% as.vector) df_byClass %>% head(n=3) %%R -w 800 -h 550 # summarize by SIPSim rep & library rep df_byClass.s = df_byClass %>% group_by(method, percIncorp, percTaxa, variables) %>% summarize(mean_value = mean(values), sd_value = sd(values)) # plotting ggplot(df_byClass.s, aes(variables, mean_value, color=method, ymin=mean_value-sd_value, ymax=mean_value+sd_value)) + geom_pointrange(alpha=0.8, size=0.2) + labs(y='Value') + facet_grid(percTaxa ~ percIncorp) + theme_bw() + theme( text = element_text(size=16), axis.title.x = element_blank(), axis.text.x = element_text(angle=45, hjust=1) ) %%R -w 800 -h 600 # summarize by SIPSim rep & library rep vars = c('Balanced Accuracy', 'Sensitivity', 'Specificity') df_byClass.s.f = df_byClass.s %>% filter(variables %in% vars) # plotting ggplot(df_byClass.s.f, aes(variables, mean_value, fill=method, ymin=mean_value-sd_value, ymax=mean_value+sd_value)) + #geom_pointrange(alpha=0.8, size=0.2) + geom_bar(stat='identity', position='dodge') + geom_errorbar(stat='identity', position='dodge') + scale_y_continuous(breaks=seq(0, 1, 0.2)) + facet_grid(percTaxa ~ percIncorp) + theme_bw() + theme( text = element_text(size=16), axis.title.x = element_blank(), axis.text.x = element_text(angle=45, hjust=1), axis.title.y = element_blank() ) """ Explanation: --End of simulation-- Plotting results End of explanation """
mzszym/oedes
examples/scl/transient-with-trapping.ipynb
agpl-3.0
%matplotlib inline import matplotlib.pylab as plt from oedes import * init_notebook() """ Explanation: Transient space-charge-limited current with trapping This example shows how to run transient simulation of space-charge-limited diode. It considers a case of investigated in a classical paper. In the reference, an idealized case of time-dependent space-charge-limited current is considered. The diffusion is neglected, and the release of trapped charge carriers is not taken into account. The sample is assumed to be insulating and free of mobile or trapped charges at time zero. The contacts are assumed to be ohmic, and the conduction is unipolar. It is shown that under these assumptions, transient current will have a distinct peak at a time proportional to transit time $t_{peak} \approx 0.786 \frac{L^2}{\mu V}$, with $L$ being the sample thickness, $\mu$ the charge carrier mobility, and $V$ the applied voltage. Remarkably, the position of peak is shown to be almost not affected by the trapping process. This motivates the use of transient SCL experiment as a reliable contact test, and as a method to measure mobility. oedes cannot reproduce Fig. 5 from the reference exactly, because it always includes diffusion for numerical reasons. In this example, the influence of diffusion is reduced by setting temperature to a low value (1 K), and by using relatively high voltage and device thickness. Attempts to further reduce the diffusion will eventually lead to numerical problems. The trapping model model must include detrapping, therefore very deep trap level (-1 eV) with very high concentration are used to simulate "trap only" trapping level. Trapping rate is set corresponding to the trapping time. The features of transient signal can be made more similar to the paper by using more precise integration in time. This reduces the amount of numerical dissipation. End of explanation """ L = 1e-6 voltage = 1e3 mu = 1e-15 epsilon_r = 1. params = { 'T': 1, 'electrode0.workfunction': 0, 'electrode1.workfunction': 0, 'hole.energy': 0, 'hole.mu': mu, 'hole.N0': 1e27, 'electrode0.voltage': voltage, 'electrode1.voltage': 0, 'epsilon_r': epsilon_r, 'hole.trap.energy': 1., 'hole.trap.trate': 0., 'hole.trap.N0': 1e30 } """ Explanation: Model and parameters End of explanation """ model = models.std.holeonly(L, traps=['trap']) """ Explanation: Stock unipolar model is used: End of explanation """ t0 = functions.physics.ManyRakavy( mu, voltage, L) / functions.physics.ManyRakavy_t1 j0 = functions.physics.MottGurney(epsilon_r, mu, voltage, L) """ Explanation: Normalization We use the normalization of axes introduced in the reference: End of explanation """ reltol = 1e-5 # decrease to increase quality and runtime """ Explanation: Accuracy Decreasing tolerance below improves the shape of responses, at the expense of increased number of timesteps. End of explanation """ for tau in progressbar([1e100, 5, 2, 1., 0.5, 0.25],desc='tau'): params['hole.trap.trate'] = 1. / (tau * params['hole.trap.N0']) c = context(model) c.transient(params, 5, 1e-1, reltol=reltol) t, j = c.teval('time', 'J') plt.plot(t / t0, j / j0, label=tau) testing.store(j, rtol=1e-5) plt.ylim([0, 1.5]) plt.xlim([0, 5]) plt.xlabel(r'Normalized time [$\frac{L^2}{\mu V}$]') plt.ylabel( r'Normalized current [$\frac{9}{8}\varepsilon \mu \frac{V^2}{L^3}$]') plt.show() """ Explanation: Result End of explanation """
theandygross/HIV_Methylation
Setup/DX_Imports.ipynb
mit
import os if os.getcwd().endswith('Setup'): os.chdir('..') import NotebookImport from Setup.Imports import * from scipy.special import logit logit_adj = lambda df: logit(df.clip(.001, .999)) """ Explanation: Helpers for Finding Differentially Methylated Probes End of explanation """ def boxplot_panel(hit_vec, response_df): """ Draws a series of paired boxplots with the rows of the response_df split according to hit_vec. """ b = response_df.copy() #b.columns = pd.MultiIndex.from_arrays([b.columns, hit_vec.ix[b.columns]]) b = b.T v1, v2 = hit_vec.unique() test = lambda v: Stats.anova(hit_vec, v) res = b.apply(test).T #p = res.p.order() p = res.p b = b.ix[:, p.index] l1 = list(b.ix[ti(hit_vec == v1)].as_matrix().T) l2 = list(b.ix[ti(hit_vec == v2)].as_matrix().T) boxes = [x for t in zip(l1, l2) for x in t] ax1, bp = paired_boxplot(boxes) y_lim = (response_df.T.quantile(.9).max()) * 1.2 pts = [(i * 3.5 + .5, y_lim) for i, n in enumerate(p) if n < .0000001] if len(pts) > 0: s1 = ax1.scatter(*zip(*pts), marker='$**$', label='$p<10^{-5}$', s=200) else: s1 = None pts = [(i * 3.5 + .5, y_lim) for i, n in enumerate(p) if (n < .01) and (n > .0000001)] if len(pts) > 0: s2 = ax1.scatter(*zip(*pts), marker='$*$', label='$p<10^{-2}$', s=30) else: s2 = None #ax1.set_xticklabels(b.columns) ax1.set_xticklabels('') #ax1.set_ybound(-.2,.3) #ax1.legend(bp['boxes'][:2] + [s2, s1], # (v1, v2, '$p<10^{-2}$', '$p<10^{-5}$'), # loc='best', scatterpoints=1) """ Explanation: Couple of minor tweaks End of explanation """ def entropy(p): ''' Entropy of a methylaion vector. Here we assume 50% methylation is random and 0 or 1 constitute an informative measument. ''' q = 1. - p s = np.sum((p*np.log(p)) + (q*np.log(q))) / (np.log(.5) * len(p)) s.name = 'Entropy' return s """ Explanation: Some additional functions I should add to my statistics module End of explanation """ def svd_entropy(df): U,S,vH = frame_svd(df) p = S ** 2 / sum(S ** 2) entropy = -1 * sum((p * np.log(p))) / log(len(p)) return entropy def entropy_gain(split, df): df = df.ix[:, split.index] h_all = svd_entropy(df) h_1 = svd_entropy(df.ix[:, ti(split)]) h_0 = svd_entropy(df.ix[:, ti(split==False)]) ratio = h_all - (h_1*split.mean() + h_0*(1-split.mean())) return pd.Series({'gain':ratio, 'h_all': h_all, 'h_0':h_0, 'h_1':h_1}) """ Explanation: Some functions for dealing with entropy of a matrix End of explanation """ def ttest_df(split_vec, df): dmean = df.T.groupby(split_vec).mean().T dvar = df.T.groupby(split_vec).var().T dn = df.T.groupby(split_vec).count().astype(float).T s12 = ((((dn[True] - 1)*dvar[True]) + ((dn[False] - 1)*dvar[False])) / (dn.sum(1) - 2)) ** .5 t = (dmean[True] - dmean[False]) / (s12 * np.sqrt((1/dn[True]) + (1/dn[False]))) t = t.dropna() return t def effect_size(split_vec, df): dmean = df.T.groupby(split_vec).mean().T return pd.concat([dmean[True], dmean[False], dmean[True] - dmean[False]], axis=1, keys=['g1','g2','d']) """ Explanation: Helper functions for diffential expression End of explanation """ from Data.Annotations import read_in_pathways gs, gl = read_in_pathways('/cellar/users/agross/TCGA_Code/TCGA/Extra_Data/c2.cp.v3.0.symbols_edit.csv') gs = pd.DataFrame({p: pd.Series(1, index=s) for p,s in gs.iteritems()}) gs = gs.ix[gl.keys()].fillna(0) gene_sets = gs """ Explanation: Read in Gene Sets I don't like this thing with the paths, hopfully fix later End of explanation """ STORE = HDFS_DIR + 'methylation_annotation_2.h5' islands = pd.read_hdf(STORE, 'islands') locations = pd.read_hdf(STORE, 'locations') other = pd.read_hdf(STORE, 'other') snps = pd.read_hdf(STORE, 'snps') probe_annotations = pd.read_hdf(STORE, 'probe_annotations') """ Explanation: Read in Probe Annotations from Data-Store These are parsed out in Compile_Probe_Annoations notebook. End of explanation """ isl = islands.sort(['Islands_Name','Relation_to_Island']).dropna() isl = isl[isl.Islands_Name.isin(ti(isl.Islands_Name.value_counts() > 7))] ot = other.Regulatory_Feature_Name ot = ot[ot.isin(ti(ot.value_counts()> 7))] gb = pd.concat([isl, probe_annotations], axis=1) gb = gb[gb.Gene_Symbol.notnull() & gb.Islands_Name.notnull()] g2 = gb.sort('Islands_Name') top_gene = lambda s: s.Gene_Symbol.value_counts().index[0] island_to_gene = g2.groupby('Islands_Name').apply(top_gene) def map_to_islands(s): ''' s is a Series of measurments on the probe level. ''' on_island = s.groupby(isl.Islands_Name).mean().order() v = pd.concat([island_to_gene, on_island], axis=1).set_index(0)[1] islands_mapped_to_genes = v.groupby(level=0).mean().order() return on_island, islands_mapped_to_genes """ Explanation: Auxilary function to map a data-vector from probes onto CpG Islands I've got some globals going on in here If I keep it, I'm probalby going to have to move this to a class End of explanation """ def island_plot_maker(df, split, islands, ann, colors=None): ''' df: a DataFrame of probe beta values islands: a DataFrame mapping probes to CpG islands and annotations ann: a DataFrame mapping probes to gene annotations and genomic coordinates of probe ''' if colors is None: colors = colors_st groups = split.dropna().unique() assert len(groups) == 2 def f(region): p = ti(islands.Islands_Name == region) p3 = ann.ix[p].join(islands.ix[p]).sort('Genomic_Coordinate') p = p3.index in_island = ti(p3.Relation_to_Island == 'Island') fig, ax = subplots(figsize=(10,4)) for i,g in enumerate(groups): ax.scatter(p3.Genomic_Coordinate, df[ti(split == g)].ix[p].mean(1), color=colors[i], label=g) ax.axvspan(p3.Genomic_Coordinate.ix[in_island[0]] - 30, p3.Genomic_Coordinate.ix[in_island[-1]] + 30, alpha=.2, color=colors[2], label='Island') ax.set_xlabel('Genomic Coordinate') ax.set_ylabel('Beta Value') ax.legend(loc='lower right', fancybox=True) prettify_ax(ax) return f """ Explanation: Helper for making CpG island plots End of explanation """ cpg_island = isl.Relation_to_Island == 'Island' dhs_site = other.DHS == 'TRUE' enhancer = other.Enhancer == 'TRUE' gene_body = other.UCSC_RefGene_Group.str.contains('Body') gene_tss = other.UCSC_RefGene_Group.str.contains('TSS') promoter = other.Regulatory_Feature_Group.str.contains('Promoter_Associated') """ Explanation: Create annotation probe sets End of explanation """ p = '/cellar/users/agross/Data/GeneSets/PRC2_Binding/' prc2_probes = pd.read_csv(p + 'mapped_to_methylation_probes.csv', index_col=0) prc2_probes = prc2_probes.sum(1)>2 probe_sets = {'PRC2': prc2_probes, 'CpG Island': cpg_island, 'DHS Site': dhs_site, 'Enhancer': enhancer, 'Gene Body': gene_body, 'TSS': gene_tss, 'Promoter': promoter} """ Explanation: PRC2 probe annotations are initiallized in PRC2 Probes notbook. End of explanation """
omoju/Fundamentals
Data/data_Stats_4_ABTesting.ipynb
gpl-3.0
%pylab inline # Import libraries from __future__ import absolute_import, division, print_function # Ignore warnings import warnings #warnings.filterwarnings('ignore') import sys sys.path.append('tools/') import numpy as np import pandas as pd import scipy.stats as st # Graphing Libraries import matplotlib.pyplot as pyplt import seaborn as sns sns.set_style("white") # Configure for presentation np.set_printoptions(threshold=50, linewidth=50) import matplotlib as mpl mpl.rc('font', size=16) from IPython.display import display def axis_tick_frequency(ax, axis, freq): """The frequency of the y axis tick marks Attributes ---------- ax: matplotlib axis object axis: char eithher 'y' or 'x' freq: int, the integer value of which the range moves """ if axis == 'y': start, end = ax.get_ylim() ax.yaxis.set_ticks(np.arange(start, end, freq)) elif axis == 'x': start, end = ax.get_xlim() ax.xaxis.set_ticks(np.arange(start, end, freq)) else: raise ValueError('{argument} is not a valid axis object'.format(argument=repr(axis))) def sample(num_sample, top, with_replacement=False): """ Create a random sample from a table Attributes --------- num_sample: int top: dataframe with_replacement: boolean Returns a random subset of table index """ df_index = [] lst = np.arange(0, len(top), 1) for i in np.arange(0, num_sample, 1): # pick randomly from the whole table sample_index = np.random.choice(lst) if with_replacement: # store index df_index.append(sample_index) else: # remove the choice that was selected lst = np.setdiff1d(lst,[sample_index]) df_index.append(sample_index) return df_index """ Explanation: Data Sampling A/B Testing End of explanation """ baby_df = pd.read_csv('data/baby.csv') baby_df.head() weight_smoke = baby_df[['Birth Weight', 'Maternal Smoker']] weight_smoke['Maternal Smoker'].value_counts() """ Explanation: Smokers and Nonsmokers Many different analyses has been performed on this random sample of mothers and their newborn infants, but not an analysis looking at the data whether the mothers smoked. One of the aims of the study was to see whether maternal smoking was associated with birth weight. End of explanation """ smoker = baby_df['Maternal Smoker'] == True non_smoker = baby_df['Maternal Smoker'] == False df_non_smoker = baby_df.ix[baby_df[non_smoker].index, :] df_non_smoker.columns = [u'Non Smoker Birth Weight', u'Gestational Days', u'Maternal Age', u'Maternal Height', u'Maternal Pregnancy Weight', u'Maternal Smoker'] df_smoker = baby_df.ix[baby_df[smoker].index, :] df_smoker.columns = [u'Smoker Birth Weight', u'Gestational Days', u'Maternal Age', u'Maternal Height', u'Maternal Pregnancy Weight', u'Maternal Smoker'] df_non_smoker['Non Smoker Birth Weight'].plot.hist(bins=np.arange(40, 186, 5), normed=True, alpha = 0.8) df_smoker['Smoker Birth Weight'].plot.hist(bins=np.arange(40, 186, 5), normed=True, alpha = 0.8) pyplt.ylabel("percent per ounce") pyplt.xlabel("Birth Weight (ounce)") pyplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.); """ Explanation: The histogram below displays the distribution of birth weights of the babies of the non-smokers and smokers in the sample. End of explanation """ a = df_non_smoker['Non Smoker Birth Weight'].values b = df_smoker['Smoker Birth Weight'].values # difference in the means a.mean() - b.mean() raw = { 'Maternal Smoker': [False, True], 'Birth Weight mean': [123.085, 113.819] } means_table = pd.DataFrame(raw) means_table statistic, pvalue = st.ttest_ind(a, b) print ('T statistic: %.2f'%statistic,'\nP-value:%.2f'%pvalue) """ Explanation: Both distributions are approximately bell shaped and centered near 120 ounces. The distributions are not identical, of course, which raises the question of whether the difference reflects just chance variation or a difference in the distributions in the population. Null hypothesis: In the population, the distribution of birth weights of babies is the same for mothers who don't smoke as for mothers who do. The difference in the sample is due to chance. Alternative hypothesis: The two distributions are different in the population. Test statistic: T test Alternatively, we could use: Test statistic: Birth weight is a quantitative variable, so it is reasonable to use the absolute difference between the means as the test statistic. The observed value of the test statistic is about 9.27 ounces. End of explanation """ import scikits.bootstrap as bootstrap import scipy # compute 95% confidence intervals around the mean CIs = bootstrap.ci(baby_df[['Birth Weight', 'Maternal Smoker']], scipy.mean) print ("Bootstrapped 95% confidence interval around the mean\nLow:", CIs[0], "\nHigh:", CIs[1]) # bootstrap 5000 samples instead of only 1174 CIs = bootstrap.ci(baby_df[['Birth Weight', 'Maternal Smoker']], scipy.mean, n_samples=5000) print ("Bootstrapped 95% confidence interval with 5,000 samples\nLow:", CIs[0], "\nHigh:", CIs[1]) def get_means(df, variable, classes): """ Gets the means of a variable grouped by its class Attributes ------------- df: a pandas dataframe variable: column classes: column (bool) """ class_a = df[classes] == True class_b = df[classes] == False df_class_b = df.ix[df[class_b].index, :] df_class_a = df.ix[df[class_a].index, :] a = df_class_b[variable].values b = df_class_a[variable].values # difference in the means a.mean() - b.mean() raw = { classes: [False, True], variable: [a.mean(), b.mean()] } means_table = pd.DataFrame(raw) return means_table def bootstrap_ci_means(table, variable, classes, repetitions): """Bootstrap approximate 95% confidence interval for the difference between the means of the two classes in the population Attributes ------------- table: a pandas dataframe variable: column classes: column (bool) repetitions: int """ t = table[[variable, classes]] mean_diffs = [] for i in np.arange(repetitions): bootstrap_sampl = table.ix[sample(len(table), table, with_replacement=True), :] m_tbl = get_means(bootstrap_sampl, variable, classes) new_stat = m_tbl.ix[0, variable] - m_tbl.ix[1, variable] mean_diffs = np.append(mean_diffs, new_stat) left = np.percentile(mean_diffs, 2.5) right = np.percentile(mean_diffs, 97.5) # Find the observed test statistic means_table = get_means(t, variable, classes) obs_stat = means_table.ix[0, variable] - means_table.ix[1, variable] df = pd.DataFrame() df['Difference Between Means'] = mean_diffs df.plot.hist(bins=20, normed=True) plot([left, right], [0, 0], color='yellow', lw=8); print('Observed difference between means:', obs_stat) print('Approximate 95% CI for the difference between means:') print(left, 'to', right) bootstrap_ci_means(baby_df, 'Birth Weight', 'Maternal Smoker', 5000) """ Explanation: The P-value is very, very small. As a result, we can reject the null hypothesis and conclude that in the population, the distribution of birth weights for babies of mothers who smoke and those that don't smoke are different. Bootstrap Confidence Interval For the Difference Our A/B test has concluded that the two distributions are different, but that's a little unsatisfactory. - How different are they? - Which one has the larger mean? These are natural questions that the test can't answer. Instead of just asking a yes/no question about whether the two distributions are different, we might learn more by not making any hypotheses and simply estimating the difference between the means. The observed difference (nonsmokers −− smokers) was about 9.27 ounces. - The positive sign says that the non-smoking mothers had larger babies on average. But samples could have come out differently due to randomness. To see how different, we have to generate more samples; to generate more samples, we'll use the bootstrap. The bootstrap makes no hypotheses about whether or not the two distributions are the same. It simply replicates the original random sample and computes new values of the statistic. The function bootstrap_ci_means returns a bootstrap confidence interval for the difference between the means of the two groups in the population. In our example, the confidence interval would estimate the difference between the average birth weights of babies of mothers who didn't smoke and mothers who did, in the entire population. The function returns an approximate 95% confidence interval for the difference between the two means, using the bootstrap percentile method. End of explanation """ bootstrap_ci_means(baby_df, 'Maternal Age', 'Maternal Smoker', 5000) """ Explanation: This bootstrapped confidence interval tells us that on average non-smoking mothers had babies that weighed between 5.8 to 12.8 ounces larger than their smoking counter parts. Furthermore, because 0 is not included in the confidence interval between the difference in the means, we can tell that this distributions are different. End of explanation """
fonnesbeck/scientific-python-workshop
notebooks/Regression Modeling.ipynb
cc0-1.0
%matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() from scipy.optimize import fmin x = np.array([2.2, 4.3, 5.1, 5.8, 6.4, 8.0]) y = np.array([0.4, 10.1, 14.0, 10.9, 15.4, 18.5]) plt.plot(x,y,'ro') """ Explanation: Regression modeling A general, primary goal of many statistical data analysis tasks is to relate the influence of one variable on another. For example, we may wish to know how different medical interventions influence the incidence or duration of disease, or perhaps a how baseball player's performance varies as a function of age. End of explanation """ sum_of_squares = lambda theta, x, y: np.sum((y - theta[0] - theta[1]*x) ** 2) sum_of_squares([0,1],x,y) b0,b1 = fmin(sum_of_squares, [0,1], args=(x,y)) b0,b1 plt.plot(x, y, 'ro') plt.plot([0,10], [b0, b0+b1*10]) plt.plot(x, y, 'ro') plt.plot([0,10], [b0, b0+b1*10]) for xi, yi in zip(x,y): plt.plot([xi]*2, [yi, b0+b1*xi], 'k:') plt.xlim(2, 9); plt.ylim(0, 20) """ Explanation: We can build a model to characterize the relationship between $X$ and $Y$, recognizing that additional factors other than $X$ (the ones we have measured or are interested in) may influence the response variable $Y$. <div style="font-size: 150%;"> $y_i = f(x_i) + \epsilon_i$ </div> where $f$ is some function, for example a linear function: <div style="font-size: 150%;"> $y_i = \beta_0 + \beta_1 x_i + \epsilon_i$ </div> and $\epsilon_i$ accounts for the difference between the observed response $y_i$ and its prediction from the model $\hat{y_i} = \beta_0 + \beta_1 x_i$. This is sometimes referred to as process uncertainty. We would like to select $\beta_0, \beta_1$ so that the difference between the predictions and the observations is zero, but this is not usually possible. Instead, we choose a reasonable criterion: the smallest sum of the squared differences between $\hat{y}$ and $y$. <div style="font-size: 120%;"> $$R^2 = \sum_i (y_i - [\beta_0 + \beta_1 x_i])^2 = \sum_i \epsilon_i^2 $$ </div> Squaring serves two purposes: (1) to prevent positive and negative values from cancelling each other out and (2) to strongly penalize large deviations. Whether the latter is a good thing or not depends on the goals of the analysis. In other words, we will select the parameters that minimize the squared error of the model. End of explanation """ sum_of_absval = lambda theta, x, y: np.sum(np.abs(y - theta[0] - theta[1]*x)) b0,b1 = fmin(sum_of_absval, [0,1], args=(x,y)) print('\nintercept: {0:.2}, slope: {1:.2}'.format(b0,b1)) plt.plot(x, y, 'ro') plt.plot([0,10], [b0, b0+b1*10]) """ Explanation: Minimizing the sum of squares is not the only criterion we can use; it is just a very popular (and successful) one. For example, we can try to minimize the sum of absolute differences: End of explanation """ sum_squares_quad = lambda theta, x, y: np.sum((y - theta[0] - theta[1]*x - theta[2]*(x**2)) ** 2) b0,b1,b2 = fmin(sum_squares_quad, [1,1,-1], args=(x,y)) print('\nintercept: {0:.2}, x: {1:.2}, x2: {2:.2}'.format(b0,b1,b2)) plt.plot(x, y, 'ro') xvals = np.linspace(0, 10, 100) plt.plot(xvals, b0 + b1*xvals + b2*(xvals**2)) """ Explanation: We are not restricted to a straight-line regression model; we can represent a curved relationship between our variables by introducing polynomial terms. For example, a cubic model: <div style="font-size: 150%;"> $y_i = \beta_0 + \beta_1 x_i + \beta_2 x_i^2 + \epsilon_i$ </div> End of explanation """ sum_squares_cubic = lambda theta, x, y: np.sum((y - theta[0] - theta[1]*x - theta[2]*(x**2) - theta[3]*(x**3)) ** 2) bb = pd.read_csv("../data/baseball.csv", index_col=0) plt.plot(bb.hr, bb.rbi, 'r.') b0,b1,b2,b3 = fmin(sum_squares_cubic, [0,1,-1,0], args=(bb.hr, bb.rbi)) xvals = np.arange(40) plt.plot(xvals, b0 + b1*xvals + b2*(xvals**2) + b3*(xvals**3)) """ Explanation: Although polynomial model characterizes a nonlinear relationship, it is a linear problem in terms of estimation. That is, the regression model $f(y | x)$ is linear in the parameters. For some data, it may be reasonable to consider polynomials of order>2. For example, consider the relationship between the number of home runs a baseball player hits and the number of runs batted in (RBI) they accumulate; clearly, the relationship is positive, but we may not expect a linear relationship. End of explanation """ # Write your answer here """ Explanation: Exercise: Polynomial function Write a function that specifies a polynomial of arbitrary degree. End of explanation """ from sklearn import linear_model straight_line = linear_model.LinearRegression() straight_line.fit(x.reshape(-1, 1), y) straight_line.coef_ plt.plot(x, y, 'ro') plt.plot(x, straight_line.predict(x[:, np.newaxis]), color='blue', linewidth=3) """ Explanation: In practice, we need not fit least squares models by hand because they are implemented generally in packages such as scikit-learn and statsmodels. For example, scikit-learn package implements least squares models in its LinearRegression class: End of explanation """ from patsy import dmatrix X = dmatrix('x + I(x**2)') np.asarray(X) """ Explanation: For more general regression model building, its helpful to use a tool for describing statistical models, called patsy. With patsy, it is easy to specify the desired combinations of variables for any particular analysis, using an "R-like" syntax. patsy parses the formula string, and uses it to construct the approriate design matrix for the model. For example, the quadratic model specified by hand above can be coded as: End of explanation """ poly_line = linear_model.LinearRegression(fit_intercept=False) poly_line.fit(X, y) poly_line.coef_ plt.plot(x, y, 'ro') plt.plot(x, poly_line.predict(X), color='blue', linewidth=3) """ Explanation: The dmatrix function returns the design matrix, which can be passed directly to the LinearRegression fitting method. End of explanation """ def calc_poly(params, data): x = np.c_[[data**i for i in range(len(params))]] return np.dot(params, x) sum_squares_poly = lambda theta, x, y: np.sum((y - calc_poly(theta, x)) ** 2) betas = fmin(sum_squares_poly, np.zeros(10), args=(x,y), maxiter=1e6) plt.plot(x, y, 'ro') xvals = np.linspace(0, max(x), 100) plt.plot(xvals, calc_poly(betas, xvals)) """ Explanation: Model Selection How do we choose among competing models for a given dataset? More parameters are not necessarily better, from the standpoint of model fit. For example, fitting a 9-th order polynomial to the sample data from the above example certainly results in an overfit. End of explanation """ n = len(x) aic = lambda rss, p, n: n * np.log(rss/(n-p-1)) + 2*p RSS1 = sum_of_squares(fmin(sum_of_squares, [0,1], args=(x,y)), x, y) RSS2 = sum_squares_quad(fmin(sum_squares_quad, [1,1,-1], args=(x,y)), x, y) print('\nModel 1: {0}\nModel 2: {1}'.format(aic(RSS1, 2, n), aic(RSS2, 3, n))) """ Explanation: One approach is to use an information-theoretic criterion to select the most appropriate model. For example Akaike's Information Criterion (AIC) balances the fit of the model (in terms of the likelihood) with the number of parameters required to achieve that fit. We can easily calculate AIC as: $$AIC = n \log(\hat{\sigma}^2) + 2p$$ where $p$ is the number of parameters in the model and $\hat{\sigma}^2 = RSS/(n-p-1)$. Notice that as the number of parameters increase, the residual sum of squares goes down, but the second term (a penalty) increases. To apply AIC to model selection, we choose the model that has the lowest AIC value. End of explanation """ titanic = pd.read_excel("../data/titanic.xls", "titanic") titanic.name jitter = np.random.normal(scale=0.02, size=len(titanic)) plt.scatter(np.log(titanic.fare), titanic.survived + jitter, alpha=0.3) plt.yticks([0,1]) plt.ylabel("survived") plt.xlabel("log(fare)") """ Explanation: Hence, on the basis of "information distance", we would select the 2-parameter (linear) model. Logistic Regression Fitting a line to the relationship between two variables using the least squares approach is sensible when the variable we are trying to predict is continuous, but what about when the data are dichotomous? male/female pass/fail died/survived Let's consider the problem of predicting survival in the Titanic disaster, based on our available information. For example, lets say that we want to predict survival as a function of the fare paid for the journey. End of explanation """ x = np.log(titanic.fare[titanic.fare>0]) y = titanic.survived[titanic.fare>0] betas_titanic = fmin(sum_of_squares, [1,1], args=(x,y)) jitter = np.random.normal(scale=0.02, size=len(titanic)) plt.scatter(np.log(titanic.fare), titanic.survived + jitter, alpha=0.3) plt.yticks([0,1]) plt.ylabel("survived") plt.xlabel("log(fare)") plt.plot([0,7], [betas_titanic[0], betas_titanic[0] + betas_titanic[1]*7.]) """ Explanation: I have added random jitter on the y-axis to help visualize the density of the points, and have plotted fare on the log scale. Clearly, fitting a line through this data makes little sense, for several reasons. First, for most values of the predictor variable, the line would predict values that are not zero or one. Second, it would seem odd to choose least squares (or similar) as a criterion for selecting the best line. End of explanation """ logit = lambda p: np.log(p/(1.-p)) unit_interval = np.linspace(0,1) plt.plot(unit_interval/(1-unit_interval), unit_interval) """ Explanation: If we look at this data, we can see that for most values of fare, there are some individuals that survived and some that did not. However, notice that the cloud of points is denser on the "survived" (y=1) side for larger values of fare than on the "died" (y=0) side. Stochastic model Rather than model the binary outcome explicitly, it makes sense instead to model the probability of death or survival in a stochastic model. Probabilities are measured on a continuous [0,1] scale, which may be more amenable for prediction using a regression line. We need to consider a different probability model for this exerciese however; let's consider the Bernoulli distribution as a generative model for our data: <div style="font-size: 120%;"> $$f(y|p) = p^{y} (1-p)^{1-y}$$ </div> where $y = {0,1}$ and $p \in [0,1]$. So, this model predicts whether $y$ is zero or one as a function of the probability $p$. Notice that when $y=1$, the $1-p$ term disappears, and when $y=0$, the $p$ term disappears. So, the model we want to fit should look something like this: <div style="font-size: 120%;"> $$p_i = \beta_0 + \beta_1 x_i + \epsilon_i$$ </div> However, since $p$ is constrained to be between zero and one, it is easy to see where a linear (or polynomial) model might predict values outside of this range. We can modify this model sligtly by using a link function to transform the probability to have an unbounded range on a new scale. Specifically, we can use a logit transformation as our link function: <div style="font-size: 120%;"> $$\text{logit}(p) = \log\left[\frac{p}{1-p}\right] = x$$ </div> Here's a plot of $p/(1-p)$ End of explanation """ plt.plot(logit(unit_interval), unit_interval) """ Explanation: And here's the logit function: End of explanation """ invlogit = lambda x: 1. / (1 + np.exp(-x)) """ Explanation: The inverse of the logit transformation is: <div style="font-size: 150%;"> $$p = \frac{1}{1 + \exp(-x)}$$ </div> End of explanation """ def logistic_like(theta, x, y): p = invlogit(theta[0] + theta[1] * x) # Return negative of log-likelihood return -np.sum(y * np.log(p) + (1-y) * np.log(1 - p)) """ Explanation: So, now our model is: <div style="font-size: 120%;"> $$\text{logit}(p_i) = \beta_0 + \beta_1 x_i + \epsilon_i$$ </div> We can fit this model using maximum likelihood. Our likelihood, again based on the Bernoulli model is: <div style="font-size: 120%;"> $$L(y|p) = \prod_{i=1}^n p_i^{y_i} (1-p_i)^{1-y_i}$$ </div> which, on the log scale is: <div style="font-size: 120%;"> $$l(y|p) = \sum_{i=1}^n y_i \log(p_i) + (1-y_i)\log(1-p_i)$$ </div> We can easily implement this in Python, keeping in mind that fmin minimizes, rather than maximizes functions: End of explanation """ x, y = titanic[titanic.fare.notnull()][['fare', 'survived']].values.T """ Explanation: Remove null values from variables End of explanation """ b0, b1 = fmin(logistic_like, [0.5,0], args=(x,y)) b0, b1 jitter = np.random.normal(scale=0.01, size=len(x)) plt.plot(x, y+jitter, 'r.', alpha=0.3) plt.yticks([0,.25,.5,.75,1]) xvals = np.linspace(0, 600) plt.plot(xvals, invlogit(b0+b1*xvals)) """ Explanation: ... and fit the model. End of explanation """ logistic = linear_model.LogisticRegression() logistic.fit(x[:, np.newaxis], y) logistic.coef_ """ Explanation: As with our least squares model, we can easily fit logistic regression models in scikit-learn, in this case using the LogisticRegression. End of explanation """ # Write your answer here """ Explanation: Exercise: multivariate logistic regression Which other variables might be relevant for predicting the probability of surviving the Titanic? Generalize the model likelihood to include 2 or 3 other covariates from the dataset. End of explanation """
sgagnon/moore
notebooks/ObjFam Iterated Estimation of fMRI Data (LS-S).ipynb
bsd-3-clause
import pandas as pd import json from scipy import stats, signal, linalg from sklearn.decomposition import PCA import nibabel as nib import nipype from nipype import Node, SelectFiles, DataSink, IdentityInterface import matplotlib as mpl import matplotlib.pyplot as plt mpl.use("Agg") from nipype.interfaces import fsl from nipy.modalities.fmri.glm import FMRILinearModel from nibabel import Nifti1Image, save import numpy as np import os import os.path as op import shutil import sys import copy import lyman import moss from lyman import tools from lyman import default_experiment_parameters import lyman.workflows as wf from moss import glm import seaborn as sns %matplotlib inline sns.set(context="notebook", style="ticks", font="Arial") pd.set_option('display.precision', 3) """ Explanation: Iterated Estimation of fMRI Data Deconvolution within the Lyman framework, based on Mumford et al. (2012) LS-S method. For each subject and run, iteratively fit models to estimate the $\beta$/$t$-statistic for each condition of interest, while including all the other trials in a single regression. Extract the appropriate values for all voxels within a given mask. This approach uses a combination of OLS and AR1 procedures during GLM fitting. This step assumes that the timeseries has been preprocessed, and all runs have been linearly transformed into the first run's space. Import necessary packages End of explanation """ def plotSimilarityStruct(run_data, run_evs): from scipy.cluster.hierarchy import linkage, dendrogram from scipy.spatial.distance import pdist, squareform data_dist = pdist(run_data.T, 'correlation') data_link = linkage(data_dist) # Compute and plot first dendrogram. fig = plt.figure(figsize=(8,8)) # x ywidth height ax1 = fig.add_axes([0.05,0.1,0.2,0.6]) Y = linkage(data_dist, method='single') Z1 = dendrogram(Y, orientation='right',labels=run_evs, distance_sort=True) # adding/removing the axes ax1.set_xticks([]) # Compute and plot second dendrogram. ax2 = fig.add_axes([0.3,0.71,0.6,0.2]) Z2 = dendrogram(Y) ax2.set_xticks([]) ax2.set_yticks([]) #Compute and plot the heatmap axmatrix = fig.add_axes([0.37,0.1,0.6,0.6]) idx1 = Z1['leaves'] idx2 = Z2['leaves'] D = squareform(data_dist) D = D[idx1,:] D = D[:,idx2] im = axmatrix.matshow(D, aspect='auto', origin='lower', cmap=plt.cm.YlGnBu) axmatrix.set_xticks([]) axmatrix.set_yticks([]) # Plot colorbar. axcolor = fig.add_axes([1,0.1,0.02,0.6]) plt.colorbar(im, cax=axcolor) def removeSegment(run_evs, sep, remove_seg): run_evs = ["-".join(x.split('-')[0:remove_seg]) for x in list(run_evs)] return run_evs def transform_fisherZ(r): z = 0.5*np.log((1+r)/(1-r)) return z """ Explanation: Define helper functions End of explanation """ experiment = 'objfam' altmodel = 'trial-prototype' nruns = 12 subject_list = '/Volumes/group/awagner/sgagnon/ObjFam/data/subids_subset_no23or19.txt' unsmoothed = True condition_labels = True # If condition_file to specify which condition the trials belong to condition_filename = 'trial-prototype-condition.csv' # only necessary if condition_labels = True project = lyman.gather_project_info() exp = lyman.gather_experiment_info(experiment, altmodel) group = np.loadtxt(subject_list, str).tolist() exp_base = experiment exp_name = "-".join([exp_base, altmodel]) data_dir = project["data_dir"] analysis_dir = project["analysis_dir"] smoothing = "unsmoothed" if unsmoothed else "smoothed" data_dir """ Explanation: Project specific parameters End of explanation """ mask_name = 'lateraloccipital' out_val = 't' # t or beta sub_mat = [] group_evs = [] for subid in group: print subid design_file = op.join(data_dir, subid, "design", exp["design_name"] + ".csv") sub_dmat = pd.read_csv(design_file) if condition_labels: condition_file = op.join(data_dir, subid, "design", condition_filename) cond_map = pd.read_csv(condition_file) # get 3D mask as bool # mask_file = op.join(timeseries_dir, "functional_mask_xfm.nii.gz") mask_file = op.join(data_dir, subid, 'masks', mask_name + '.nii.gz') mask = nib.load(mask_file).get_data() == 1 run_mat = [] ev_list = [] for run in range(1, nruns+1): print 'Run: ' + str(run) # Setup run specific directories # preproc timeseries registered to first run timeseries_dir = op.join(analysis_dir, experiment, subid, "reg/epi/unsmoothed/run_" + str(run)) preproc_dir = op.join(analysis_dir, experiment, subid, "preproc/run_" + str(run)) realign_file = op.join(preproc_dir, "realignment_params.csv") artifact_file = op.join(preproc_dir, "artifacts.csv") timeseries_file = op.join(timeseries_dir, "timeseries_xfm.nii.gz") # Build the model design run_dmat = sub_dmat[sub_dmat.run == run] realign = pd.read_csv(realign_file) realign = realign.filter(regex="rot|trans").apply(stats.zscore) artifacts = pd.read_csv(artifact_file).max(axis=1) ntp = len(artifacts) tr = exp["TR"] hrf = getattr(glm, exp["hrf_model"]) hrf = hrf(exp["temporal_deriv"], tr, **exp["hrf_params"]) ev_mat = [] for ev in run_dmat.condition.unique(): ev_list.append(ev) design_LSS = copy.deepcopy(run_dmat) design_LSS.condition[design_LSS.condition != ev] = 'other' design_kwargs = dict(confounds=realign, artifacts=artifacts, tr=tr, condition_names=sorted(design_LSS.condition.unique()), # sort to keep condition of interest first confound_pca=exp["confound_pca"], hpf_cutoff=exp["hpf_cutoff"]) X = glm.DesignMatrix(design_LSS, hrf, ntp, **design_kwargs) # print ev # print X.design_matrix.columns # Fit model fmri_glm = FMRILinearModel(timeseries_file, np.array(X.design_matrix), mask=mask_file) fmri_glm.fit(do_scaling=True, model='ar1') # Get beta beta_hat = fmri_glm.glms[0].get_beta() # Output appropriate statistic if out_val == 'beta': ev_mat.append(beta_hat[0]) elif out_val == 't': # Calc t-statistic num_reg = beta_hat.shape[0] con = [[1] + [0]*(num_reg-1)] t_map, = fmri_glm.contrast(con, con_id=ev, contrast_type='t') t_map = t_map.get_data()[mask].ravel() ev_mat.append(t_map) run_mat.append(ev_mat) sub_mat.append(run_mat) group_evs.append(ev_list) data = np.array(sub_mat) evs = np.array(group_evs) print 'Data shape (subid x run x trial x voxel):' + str(data.shape) print 'EVs shape (subid x trial):' + str(evs.shape) data[5,0] group[5] sub_num = 8 run_data = data[sub_num,0] run_data = np.vstack(run_data).T # voxel x ev run_evs = evs[sub_num].reshape(12,30)[0] sns.corrplot(run_data[np.argsort(run_evs)], names = run_evs[np.argsort(run_evs)], diag_names=False, annot=False, cmap_range=(-1,1)) df = pd.DataFrame(run_data, columns=run_evs) corr_mat = df.corr() sns.clustermap(corr_mat) """ Explanation: Iterate through subjects and runs End of explanation """ data.shape[0] sub_data.shape[0] df_corr = pd.DataFrame(columns=['subid', 'run', 'condition', 'corr']) df_condmap = pd.DataFrame() for sub_num in range(data.shape[0]): print group[sub_num] subid = group[sub_num] sub_data = data[sub_num] sub_evs = evs[sub_num].reshape(12,30) condition_file = op.join(data_dir, subid, "design", condition_filename) cond_map = pd.read_csv(condition_file) cond_map['subid'] = subid df_condmap = pd.concat([df_condmap, cond_map]) for run in range(sub_data.shape[0]): run_data = sub_data[run] run_data = np.vstack(run_data).T # voxel x ev run_evs = sub_evs[run] run_conds = removeSegment(run_evs, '-', 2) df = pd.DataFrame(run_data, columns=run_conds) for cond in set(run_conds): corr_value = np.array(df[cond].corr())[0][1] df_corr = df_corr.append([dict(subid=subid, run=run+1, condition=cond, corr=corr_value)], ignore_index=True) df_corr.head() df_corr.subid.unique().shape df_condmap.subid.unique().shape df_corr2 = df_corr.merge(df_condmap, on=['subid', 'run', 'condition']) df_corr2.head() df_corr2.subid.unique().shape sns.distplot(data.ix[1500:3000]['corr']) sns.set(context='poster', style='whitegrid') data_corr = df_corr2.join(pd.DataFrame(df_corr2.morphmem.str.split('_').tolist(), columns=['morph', 'resp'])) sns.factorplot(x='morph', y='corr', hue='resp',dodge=0.1, hue_order=['new', 'old'], ci=68, units='subid', data=data_corr) %load_ext rpy2.ipython %R require(lme4) %R require(lmerTest) %R require(ggplot2) %R -i data_corr %%R print(str(data_corr)) data_corr$morph_q = as.numeric(data_corr$morph) data_corr_noguess = data_corr[data_corr$resp != 'guess',] data_corr_noguess$resp = factor(data_corr_noguess$resp) print(str(data_corr_noguess)) contrasts(data_corr_noguess$resp) = c(-1,1) print(contrasts(data_corr_noguess$resp)) %%R res1 = lmer(corr ~ morph_q * resp + (1|subid), data=data_corr_noguess) print(summary(res1)) data_corr['morph_q'] = data_corr.morph.astype('int') data_group = data.groupby(['subid', 'morph']).mean().reset_index() data_group.head() data_group['z'] = transform_fisherZ(data_group['corr']) data_group.head() sns.violinplot(x='morph', y='z', data=data_group, inner="points") sns.coefplot("z~scale(morph_q)", data=data_group, ci=68) """ Explanation: Compute 2-way correlations End of explanation """
newsapps/public-notebooks
Weekend shootings and homicides.ipynb
mit
import os import requests # Some constants NEWSROOMDB_URL = os.environ['NEWSROOMDB_URL'] # Utilities for loading data from NewsroomDB def get_table_url(table_name, base_url=NEWSROOMDB_URL): return '{}table/json/{}'.format(base_url, table_name) def get_table_data(table_name): url = get_table_url(table_name) try: r = requests.get(url) return r.json() except: print("Request failed. Probably because the response is huge. We should fix this.") return get_table_data(table_name) shooting_victims_raw = get_table_data('shootings') print("Loaded {} shooting victims".format(len(shooting_victims_raw))) import agate from datetime import datetime, timedelta # Load raw data into an Agate table # Agate tries to parse the date and time automatically. It parses the time incorrectly # as MM:SS instead of HH:MM. We ultimately need a timestamp, which is easily # parsed by concatenating the date and time, so disable the initial # auto-parsing of these fields. column_types = { 'Date': agate.Text(), 'Time': agate.Text(), } shooting_victims = agate.Table.from_object(shooting_victims_raw, column_types=column_types) # Calculate a timestamp from the Date and Time columns def get_timestamp(row, date_col='Date', time_col='Time'): if not row[date_col] or not row[time_col]: return None try: timestamp = datetime.strptime("{} {}".format(row[date_col], row[time_col]), "%Y-%m-%d %H:%M") except ValueError: timestamp = datetime.strptime("{} {}".format(row[date_col], row[time_col]), "%Y-%m-%d %H:%M:%S") # HACK: There are some bad dates in the data. Based on visual inspection, # we can fix the dates using a couple of rules year = timestamp.year if year < 20: year += 2000 new_timestamp = timestamp.replace(year=year) print("Bad year date in row with id {}. Changing {} to {}.".format( row['_id'], timestamp.strftime("%Y-%m-%d"), new_timestamp.strftime("%Y-%m-%d"))) timestamp = new_timestamp elif year == 216: new_timestamp = timestamp.replace(year=2016) print("Bad year date in row with id {}. Changing {} to {}.".format( row['_id'], timestamp.strftime("%Y-%m-%d"), new_timestamp.strftime("%Y-%m-%d"))) timestamp = new_timestamp return timestamp shooting_victims = shooting_victims.compute([ ('timestamp', agate.Formula(agate.DateTime(), get_timestamp)) ]) shooting_victims = shooting_victims.where(lambda row: row['timestamp'] is not None) """ Explanation: Weekend shootings and homicides? How many people were shot each weekend? How many people were killed in a homicide? Which weekends had the most of both types of violence? For this analysis, we define a weekend as starting on Friday at 3 p.m. and ending on Monday at 6 a.m. Load data from Newsroom DB End of explanation """ def is_weekend(timestamp): """Does the timestamp fall between Friday 3 p.m. and Monday 6 a.m.""" if not timestamp: return False day_of_week = timestamp.weekday() if day_of_week > 0 and day_of_week < 4: return False if day_of_week == 4: # Friday # Same day, 3 p.m. start = datetime(timestamp.year, timestamp.month, timestamp.day, 15) return timestamp >= start if day_of_week == 0: # Monday # Same day, 6 a.m. end = datetime(timestamp.year, timestamp.month, timestamp.day, 6) return timestamp < end return True weekend_shootings = shooting_victims.where(lambda row: is_weekend(row['timestamp'])) print("There are {0} weekend shooting victims".format(len(weekend_shootings.rows))) from datetime import datetime import time # Utility functions for calculating weekend start and end dates/times for a given def clone_datetime(d): """Make a copy of a datetime object""" # HACK: Is there a better way to do this? Why isn't there an obvious clone method? return datetime.fromtimestamp(time.mktime(d.timetuple())) # The following methods only work for timestamps that fall within a weekend def weekend_start(timestamp): days_from_friday = timestamp.weekday() - 4 if days_from_friday < 0: days_from_friday += 1 days_from_friday *= -1 friday_delta = timedelta(days=(-1 * days_from_friday)) start = clone_datetime(timestamp) start += friday_delta start = start.replace(hour=15, minute=0, second=0) return start def weekend_end(timestamp): days_to_monday = 0 - timestamp.weekday() if days_to_monday < 0: days_to_monday += 7 monday_delta = timedelta(days=days_to_monday) end = clone_datetime(timestamp) end += monday_delta end = end.replace(hour=6, minute=0, second=0) return end def get_weekend_start(row): return weekend_start(row['timestamp']).date() # Add weekend start and end dates to each row so we can # group by on them later. Cecilia took a different approach, # calculating the weekends first and iterating through them # and finding matching shootings for each weekend. weekend_shootings_with_start_end = weekend_shootings.compute([ ('weekend_start', agate.Formula(agate.Date(), get_weekend_start)), ('weekend_end', agate.Formula(agate.Date(), lambda row: weekend_end(row['timestamp']).date())) ]) # Aggregate the shooting victims by weekend shooting_victims_by_weekend = weekend_shootings_with_start_end.group_by( lambda row: row['weekend_start'].strftime("%Y-%m-%d") + " to " + row['weekend_end'].strftime("%Y-%m-%d")) shooting_victims_weekend_counts = shooting_victims_by_weekend.aggregate([ ('count', agate.Count()) ]) shooting_victims_weekend_counts.order_by('count', reverse=True).print_table(max_column_width=40, max_rows=None) """ Explanation: Filter to only weekend shootings End of explanation """ homicides_raw = get_table_data('homicides') homicide_column_types = { 'Occ Date': agate.Text(), 'Occ Time': agate.Text(), } homicides = agate.Table.from_object(homicides_raw, column_types=homicide_column_types) homicides = homicides.compute([ ('timestamp', agate.Formula(agate.DateTime(), lambda row: get_timestamp(row, date_col='Occ Date', time_col='Occ Time'))) ]) weekend_homicides = homicides.where(lambda row: is_weekend(row['timestamp'])) weekend_homicides_with_start_end = weekend_homicides.compute([ ('weekend_start', agate.Formula(agate.Date(), get_weekend_start)), ('weekend_end', agate.Formula(agate.Date(), lambda row: weekend_end(row['timestamp']).date())) ]) homicides_by_weekend = weekend_homicides_with_start_end.group_by( lambda row: row['weekend_start'].strftime("%Y-%m-%d") + " to " + row['weekend_end'].strftime("%Y-%m-%d")) weekend_homicide_counts = homicides_by_weekend.aggregate([ ('count', agate.Count()) ]) weekend_homicide_counts.order_by('count', reverse=True).print_table(max_column_width=40, max_rows=None) """ Explanation: Do the same thing for Homicides End of explanation """ import re # First off, we need to avoid double-counting homicides and shootings def is_homicide(row): if not row['UCR']: return False if re.match(r'0{0,1}110', row['UCR']): return True return False non_homicide_weekend_shootings = weekend_shootings_with_start_end.where(lambda row: not is_homicide(row)) print("There are {0} non-homicide weekend shootings".format(len(non_homicide_weekend_shootings.rows))) non_homicide_shooting_victims_by_weekend = non_homicide_weekend_shootings.group_by( lambda row: row['weekend_start'].strftime("%Y-%m-%d") + " to " + row['weekend_end'].strftime("%Y-%m-%d")) non_homicide_shooting_victims_weekend_counts = non_homicide_shooting_victims_by_weekend.aggregate([ ('count', agate.Count()) ]) def none_to_zero(x): if x is None: return 0 return x shooting_victims_and_homicides = non_homicide_shooting_victims_weekend_counts.join(weekend_homicide_counts, 'group') shooting_victims_and_homicides = shooting_victims_and_homicides.compute([ ('total', agate.Formula(agate.Number(), lambda row: row['count'] + none_to_zero(row['count2']))), ]) shooting_victims_and_homicides.order_by('total', reverse=True).print_table(max_column_width=40, max_rows=None) """ Explanation: What about both? End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/cnrm-cerfacs/cmip6/models/cnrm-cm6-1/aerosol.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'cnrm-cerfacs', 'cnrm-cm6-1', 'aerosol') """ Explanation: ES-DOC CMIP6 Model Properties - Aerosol MIP Era: CMIP6 Institute: CNRM-CERFACS Source ID: CNRM-CM6-1 Topic: Aerosol Sub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model. Properties: 69 (37 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:53:52 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Software Properties 3. Key Properties --&gt; Timestep Framework 4. Key Properties --&gt; Meteorological Forcings 5. Key Properties --&gt; Resolution 6. Key Properties --&gt; Tuning Applied 7. Transport 8. Emissions 9. Concentrations 10. Optical Radiative Properties 11. Optical Radiative Properties --&gt; Absorption 12. Optical Radiative Properties --&gt; Mixtures 13. Optical Radiative Properties --&gt; Impact Of H2o 14. Optical Radiative Properties --&gt; Radiative Scheme 15. Optical Radiative Properties --&gt; Cloud Interactions 16. Model 1. Key Properties Key properties of the aerosol model 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of aerosol model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of aerosol model code End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.scheme_scope') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "troposhere" # "stratosphere" # "mesosphere" # "mesosphere" # "whole atmosphere" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Scheme Scope Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Atmospheric domains covered by the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.basic_approximations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Basic approximations made in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "3D mass/volume ratio for aerosols" # "3D number concenttration for aerosols" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.5. Prognostic Variables Form Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Prognostic variables in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 1.6. Number Of Tracers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of tracers in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.family_approach') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 1.7. Family Approach Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are aerosol calculations generalized into families of species? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Software Properties Software properties of aerosol code 2.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses atmospheric chemistry time stepping" # "Specific timestepping (operator splitting)" # "Specific timestepping (integrated)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Timestep Framework Physical properties of seawater in ocean 3.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Mathematical method deployed to solve the time evolution of the prognostic variables End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Split Operator Advection Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for aerosol advection (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.3. Split Operator Physical Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for aerosol physics (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.4. Integrated Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestep for the aerosol model (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Implicit" # "Semi-implicit" # "Semi-analytic" # "Impact solver" # "Back Euler" # "Newton Raphson" # "Rosenbrock" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3.5. Integrated Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify the type of timestep scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Meteorological Forcings ** 4.1. Variables 3D Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Three dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Variables 2D Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Two dimensionsal forcing variables, e.g. land-sea mask definition End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Frequency Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Frequency with which meteological forcings are applied (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Resolution Resolution in the aersosol model grid 5.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Canonical Horizontal Resolution Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 5.3. Number Of Horizontal Gridpoints Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 5.4. Number Of Vertical Levels Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Number of vertical levels resolved on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.5. Is Adaptive Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Default is False. Set true if grid resolution changes during execution. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Tuning Applied Tuning methodology for aerosol model 6.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics of the global mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics of mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Transport Aerosol transport 7.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of transport in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Specific transport scheme (eulerian)" # "Specific transport scheme (semi-lagrangian)" # "Specific transport scheme (eulerian and semi-lagrangian)" # "Specific transport scheme (lagrangian)" # TODO - please enter value(s) """ Explanation: 7.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method for aerosol transport modeling End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Mass adjustment" # "Concentrations positivity" # "Gradients monotonicity" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7.3. Mass Conservation Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method used to ensure mass conservation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.convention') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Convective fluxes connected to tracers" # "Vertical velocities connected to tracers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7.4. Convention Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Transport by convention End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Emissions Atmospheric aerosol emissions 8.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of emissions in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Prescribed (climatology)" # "Prescribed CMIP6" # "Prescribed above surface" # "Interactive" # "Interactive above surface" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method used to define aerosol species (several methods allowed because the different species may not use the same method). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Vegetation" # "Volcanos" # "Bare ground" # "Sea surface" # "Lightning" # "Fires" # "Aircraft" # "Anthropogenic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.3. Sources Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Sources of the aerosol species are taken into account in the emissions scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Interannual" # "Annual" # "Monthly" # "Daily" # TODO - please enter value(s) """ Explanation: 8.4. Prescribed Climatology Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify the climatology type for aerosol emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Prescribed Climatology Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and prescribed via a climatology End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.6. Prescribed Spatially Uniform Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and prescribed as spatially uniform End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.7. Interactive Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and specified via an interactive method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.8. Other Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and specified via an &quot;other method&quot; End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.9. Other Method Characteristics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Characteristics of the &quot;other method&quot; used for aerosol emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Concentrations Atmospheric aerosol concentrations 9.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of concentrations in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.2. Prescribed Lower Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the lower boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.3. Prescribed Upper Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the upper boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.4. Prescribed Fields Mmr Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed as mass mixing ratios. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.5. Prescribed Fields Mmr Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed as AOD plus CCNs. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10. Optical Radiative Properties Aerosol optical and radiative properties 10.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of optical and radiative properties End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11. Optical Radiative Properties --&gt; Absorption Absortion properties in aerosol scheme 11.1. Black Carbon Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.2. Dust Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of dust at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.3. Organics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of organics at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12. Optical Radiative Properties --&gt; Mixtures ** 12.1. External Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there external mixing with respect to chemical composition? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12.2. Internal Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there internal mixing with respect to chemical composition? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.3. Mixing Rule Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If there is internal mixing with respect to chemical composition then indicate the mixinrg rule End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13. Optical Radiative Properties --&gt; Impact Of H2o ** 13.1. Size Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does H2O impact size? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.2. Internal Mixture Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does H2O impact internal mixture? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14. Optical Radiative Properties --&gt; Radiative Scheme Radiative scheme for aerosol 14.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of radiative scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.2. Shortwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of shortwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.3. Longwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of longwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Optical Radiative Properties --&gt; Cloud Interactions Aerosol-cloud interactions 15.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of aerosol-cloud interactions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.2. Twomey Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the Twomey effect included? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.3. Twomey Minimum Ccn Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If the Twomey effect is included, then what is the minimum CCN number? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.4. Drizzle Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the scheme affect drizzle? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.5. Cloud Lifetime Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the scheme affect cloud lifetime? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.6. Longwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of longwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16. Model Aerosol model 16.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Dry deposition" # "Sedimentation" # "Wet deposition (impaction scavenging)" # "Wet deposition (nucleation scavenging)" # "Coagulation" # "Oxidation (gas phase)" # "Oxidation (in cloud)" # "Condensation" # "Ageing" # "Advection (horizontal)" # "Advection (vertical)" # "Heterogeneous chemistry" # "Nucleation" # TODO - please enter value(s) """ Explanation: 16.2. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Processes included in the Aerosol model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Radiation" # "Land surface" # "Heterogeneous chemistry" # "Clouds" # "Ocean" # "Cryosphere" # "Gas phase chemistry" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.3. Coupling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Other model components coupled to the Aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.gas_phase_precursors') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "DMS" # "SO2" # "Ammonia" # "Iodine" # "Terpene" # "Isoprene" # "VOC" # "NOx" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.4. Gas Phase Precursors Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of gas phase aerosol precursors. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Bulk" # "Modal" # "Bin" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.5. Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Type(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.bulk_scheme_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Nitrate" # "Sea salt" # "Dust" # "Ice" # "Organic" # "Black carbon / soot" # "SOA (secondary organic aerosols)" # "POM (particulate organic matter)" # "Polar stratospheric ice" # "NAT (Nitric acid trihydrate)" # "NAD (Nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particule)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.6. Bulk Scheme Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of species covered by the bulk scheme. End of explanation """
4dsolutions/Python5
PySummary1.ipynb
mit
lessons = { "1": "Python is part of a bigger ecosystem (example: Jupyter Notebooks).", "2": "Batteries Included refers to the well-stocked standard library.", "3": "Built-ins inside __builtins__ include the basic types such as...", "4": "__ribs__ == special names == magic methods (but not all are methods).", "5": "3rd Party Python is where a lot of the action is!", "6": "'Python fits your brain' means it gets out of your way once you learn it." } important_types = [{'Numeric': ["int", "float", "Decimal", "Fraction", "complex"], 'Collections': [{"Sequences": ["list", "range", "tuple"], "Mappings": ['dict', 'set']}], 'Descriptors': ['property']}, {'Other types': ['function', 'class', 'generator']}] for key, value in lessons.items(): # dict method to return all key:value pairs print("{}.: {}".format(key, value), file=None) # this could be HTML to a file if key == "3": print() for the_type in important_types[0]['Numeric']: print(the_type) for the_type in important_types[0]['Collections'][0]['Sequences']: print(the_type) for the_type in important_types[0]['Collections'][0]['Mappings']: print(the_type) print() """ Explanation: SAISOFT PYT-PR: Session 10 What Have We Learned? End of explanation """ import random class BatteryDead(Exception): pass class IgnitionKeyBroken(Exception): pass class Car: def start(self): as_luck_would_have_it = random.randint(0,10) if as_luck_would_have_it == 10: raise BatteryDead elif as_luck_would_have_it == 0: raise IgnitionKeyBroken print("Car starts!") try: # might not work my_car = Car() my_car.start() except BatteryDead: print("Oops, need to charge battery") except IgnitionKeyBroken: print("Oops, your key just snapped") """ Explanation: Continue to "doodle and daydream" as you find the time. Think of ways to describe your day as a Python program. Remember the story of The Car that Would Not Start. Run this a few times to see the different possible workflows. End of explanation """ from functools import wraps def decorator(f): @wraps(f) def proxy(x): # proxy print("Look at me!") return f(x) return proxy @decorator def Sqr(x): """Square Dancer""" return x * x Sqr(10) """ Explanation: We also learned about decorator syntax. Using a decorator, we're able to use a callable as an input to an object that provides a proxy output, likewise callable by the same name. End of explanation """ help(Sqr) """ Explanation: @wraps forwards the __doctstring__ and __name__ of the incoming f argument to the proxy being wrapped. LAB: Try commenting out the line with @wraps on it and checking the __doctstring__ and __name__ of Sqr again. Comment out @wraps re-run the cell containing the two function definitions re-run the cell below, and not changes change it back End of explanation """
planetlabs/notebooks
jupyter-notebooks/pixels-to-tabular-data/field_statistical_analysis.ipynb
apache-2.0
import datetime import json import os from pathlib import Path from pprint import pprint import shutil import time from zipfile import ZipFile import matplotlib.pyplot as plt import numpy as np import pandas as pd from planet import api from planet.api import downloader, filters import pyproj from rasterio import plot from rasterio.mask import raster_geometry_mask from shapely.geometry import shape, MultiPolygon from shapely.ops import transform """ Explanation: Pixels to Tabular Data Agricultural Statistical Analysis Use Case Talk about pixels and tabular data. The use case addressed in this tutorial is: As an agriculture customer, I'd like to create an imagery pipeline that provides for trialing different fungicides by ordering Planet imagery within a single field (AOI), cutting the imagery into multiple field blocks, filtering based on cloud coverage within the blocks, and comparing values across blocks in two ways. First, comparison is performed by extracting median, mean, variance NDVI values for each day (using random point sampling) in each block. Second, comparison is performed by random point selection in each block. Introduction Two things are interesting about this use case. First, we are gridding the AOI into blocks. Second, we are performing some calculations with the output to compare results across different blocks in the field. Implementation For this use case, the area of interest is specified (the field) but the time range is not. For time-series analysis the daily coverage of PS satellites is ideal. Because we are only looking at a field, we want to clip the images to the field area of interest to avoid unnecessary pixel wrangling. Also, we don't need all the bands, we are only interested in NDVI. We can use the Orders API to help us clip the images and calculate NDVI. Finally, we will need to implement a bit of functionality to filter to images that have no unusable pixels within the area of interest and for the comparisons across the field blocks. To summarize, these are the major steps: 1. Part 1: Setup 1. Part 2: Get Field NDVI 1. Part 3: Sample Field Blocks Part 1: Setup In this section, we set up the notebook and define the field and field block geometries. Import Dependencies End of explanation """ def load_geojson(filename): with open(filename, 'r') as f: return json.load(f) # this feature comes from within the sacramento_crops aoi # it is the first feature in 'ground-truth-test.geojson', which # was prepared in crop-classification/datasets-prepare.ipynb field_filename = os.path.join('pre-data', 'field.geojson') field = load_geojson(field_filename) pprint(field) # visualize field and determine size in acres print('{} acres'.format(field['properties']['ACRES'])) field_aoi = field['geometry'] shape(field_aoi) # visualize field and sample blocks # these blocks were drawn by hand randomly for this demo # they don't actually represent test field blocks blocks = load_geojson(os.path.join('pre-data', 'blocks.geojson')) block_aois = [b['geometry'] for b in blocks] MultiPolygon([shape(a) for a in [field_aoi] + block_aois]) """ Explanation: Get Field and Sample Blocks AOIs End of explanation """ # if your Planet API Key is not set as an environment variable, you can paste it below API_KEY = os.environ.get('PL_API_KEY', 'PASTE_YOUR_KEY_HERE') client = api.ClientV1(api_key=API_KEY) # create an api request from the search specifications # relax the cloud cover requirement as filtering will be done within the aoi def build_request(aoi_geom, start_date, stop_date): '''build a data api search request for clear PSScene imagery''' query = filters.and_filter( filters.geom_filter(aoi_geom), filters.date_range('acquired', gt=start_date), filters.date_range('acquired', lt=stop_date) ) return filters.build_search_request(query, ['PSScene']) def search_data_api(request, client, limit=500): result = client.quick_search(request) # this returns a generator return result.items_iter(limit=limit) # define test data for the filter test_start_date = datetime.datetime(year=2019,month=4,day=1) test_stop_date = datetime.datetime(year=2019,month=5,day=1) request = build_request(field_aoi, test_start_date, test_stop_date) print(request) items = list(search_data_api(request, client)) print('{} images match the search criteria.'.format(len(items))) # uncomment to see what an item looks like # pprint(items[0]) """ Explanation: Part 2: Get Field NDVI In this section, we use the Data and Orders APIs to find images that overlap the field AOI in the specified time period and then to download the NDVI values of pixels within the field for all of the images. Once the images are downloaded, we use the UDM2 asset to filter to images that have no unusable pixels within the AOI. Finally, we get to check out what the NDVI of the field looks like! Step 1: Search Data API The goal of this step is to get the scene ids that meet the search criteria for this use case. End of explanation """ footprints = [shape(i['geometry']) for i in items] # make sure all footprints contain the field aoi (that is, no partial overlaps) for f in footprints: assert f.contains(shape(field_aoi)) # visualize aoi and footprint MultiPolygon([shape(field_aoi), footprints[0]]) """ Explanation: Now that we have found the images that match the search criteria, let's make sure all of the images fully contain the field AOI (we don't want to just get half of the field) and then let's see what the image footprint and the AOI look like together. End of explanation """ def get_tools(aoi_geom): # clip to AOI clip_tool = {'clip': {'aoi': aoi_geom}} # convert to NDVI ndvi_tool = {'bandmath': { "pixel_type": "32R", "b1": "(b4 - b3) / (b4+b3)" }} tools = [clip_tool, ndvi_tool] return tools """ Explanation: Whoa look! That AOI is tiny relative to the image footprint. We don't want to wrangle all those pixels outside of the AOI. We definately want to clip the imagery footprints to the AOI. Step 2: Submit Order Now that we have the scene ids, we can create the order. The output of this step is a single zip file that contains all of the scenes that meet our criteria. The tools we want to apply are: clip imagery to AOI and convert imagery to NDVI. Step 2.1: Define Toolchain Tools End of explanation """ def build_order(ids, name, aoi_geom): # specify the PSScene 4-Band surface reflectance product # make sure to get the *_udm2 bundle so you get the udm2 product # note: capitalization really matters in item_type when using planet client orders api item_type = 'PSScene' bundle = 'analytic_sr_udm2' orders_request = { 'name': name, 'products': [{ 'item_ids': ids, 'item_type': item_type, 'product_bundle': bundle }], 'tools': get_tools(aoi_geom), 'delivery': { 'single_archive': True, 'archive_filename':'{{name}}_{{order_id}}.zip', 'archive_type':'zip' }, 'notifications': { 'email': False }, } return orders_request # uncomment to see what an order request would look like # pprint(build_order(['id'], 'demo', test_aoi_geom), indent=4) ids = [i['id'] for i in items] name = 'pixels_to_tabular' order_request = build_order(ids, name, field_aoi) """ Explanation: Step 2.2: Build Order Requests End of explanation """ def create_order(order_request, client): orders_info = client.create_order(order_request).get() return orders_info['id'] order_id = create_order(order_request, client) order_id """ Explanation: Step 2.3: Submit Order End of explanation """ def poll_for_success(order_id, client, num_loops=50): count = 0 while(count < num_loops): count += 1 order_info = client.get_individual_order(order_id).get() state = order_info['state'] print(state) success_states = ['success', 'partial'] if state == 'failed': raise Exception(response) elif state in success_states: break time.sleep(10) poll_for_success(order_id, client) """ Explanation: Step 3: Download Orders Step 3.1: Wait Until Orders are Successful Before we can download the orders, they have to be prepared on the server. End of explanation """ data_dir = os.path.join('data', 'field_statistical_analysis') # make the download directory if it doesn't exist Path(data_dir).mkdir(parents=True, exist_ok=True) def poll_for_download(dest, endswith, num_loops=50): count = 0 while(count < num_loops): count += 1 matched_files = (f for f in os.listdir(dest) if os.path.isfile(os.path.join(dest, f)) and f.endswith(endswith)) match = next(matched_files, None) if match: match = os.path.join(dest, match) print('downloaded') break else: print('waiting...') time.sleep(10) return match def download_order(order_id, dest, client, limit=None): '''Download an order by given order ID''' # this returns download stats but they aren't accurate or informative # so we will look for the downloaded file on our own. dl = downloader.create(client, order=True) urls = client.get_individual_order(order_id).items_iter(limit=limit) dl.download(urls, [], dest) endswith = '{}.zip'.format(order_id) filename = poll_for_download(dest, endswith) return filename downloaded_file = download_order(order_id, data_dir, client) downloaded_file """ Explanation: Step 3.2: Run Download For this step we will use the planet python orders API because the CLI doesn't do a complete download with large orders. End of explanation """ def unzip(filename, overwrite=False): location = Path(filename) zipdir = location.parent / location.stem if os.path.isdir(zipdir): if overwrite: print('{} exists. overwriting.'.format(zipdir)) shutil.rmtree(zipdir) else: raise Exception('{} already exists'.format(zipdir)) with ZipFile(location) as myzip: myzip.extractall(zipdir) return zipdir zipdir = unzip(downloaded_file) zipdir def get_unzipped_files(zipdir): filedir = zipdir / 'files' filenames = os.listdir(filedir) return [filedir / f for f in filenames] file_paths = get_unzipped_files(zipdir) file_paths[0] """ Explanation: Step 4: Unzip Order In this section, we will unzip the order into a directory named after the downloaded zip file. End of explanation """ udm2_files = [f for f in file_paths if 'udm2' in str(f)] # we want to find pixels that are inside the footprint but cloudy # the easiest way to do this is is the udm values (band 8) # https://developers.planet.com/docs/data/udm-2/ # the UDM values are given in # https://assets.planet.com/docs/Combined-Imagery-Product-Spec-Dec-2018.pdf # Bit 0: blackfill (footprint) # Bit 1: cloud covered def read_udm(udm2_filename): with rasterio.open(udm2_filename) as img: # band 8 is the udm band return img.read(8) def get_cloudy_percent(udm_band): blackfill = udm_band == int('1', 2) footprint_count = udm_band.size - np.count_nonzero(blackfill) cloudy = udm_band.size - udm_band == int('10', 2) cloudy_count = np.count_nonzero(cloudy) return (cloudy_count / footprint_count) get_cloudy_percent(read_udm(udm2_files[0])) clear_udm2_files = [f for f in udm2_files if get_cloudy_percent(read_udm(f)) < 0.00001] print(len(clear_udm2_files)) def get_id(udm2_filename): return udm2_filename.name.split('_3B')[0] clear_ids = [get_id(f) for f in clear_udm2_files] clear_ids[0] """ Explanation: Step 5: Filter by Cloudiness In this section, we will filter images that have any clouds within the AOI. We use the Unusable Data Mask (UDM2) to determine cloud pixels. End of explanation """ def get_img_path(img_id, file_paths): filename = '{}_3B_AnalyticMS_SR_clip_bandmath.tif'.format(img_id) return next(f for f in file_paths if f.name == filename) def read_ndvi(img_filename): with rasterio.open(img_filename) as img: # ndvi is a single-band image band = img.read(1) return band plot.show(read_ndvi(get_img_path(clear_ids[0], file_paths))) """ Explanation: Step 6: Get Clear Images End of explanation """ def get_udm2_path(img_id, file_paths): filename = '{}_3B_udm2_clip.tif'.format(img_id) return next(f for f in file_paths if f.name == filename) def read_blackfill(udm2_filename): with rasterio.open(udm2_filename) as img: # the last band is the udm band udm_band = img.read(8) blackfill = udm_band == int('1', 2) return blackfill plot.show(read_blackfill(get_udm2_path(clear_ids[0], file_paths))) # there is an issue where some udms aren't the same size as the images # to deal with this just cut off any trailing rows/columns # this isn't ideal as it can result in up to one pixel shift in x or y direction def crop(img, shape): return img[:shape[0], :shape[1]] def read_masked_ndvi(img_filename, udm2_filename): ndvi = read_ndvi(img_filename) blackfill = read_blackfill(udm2_filename) # crop image and mask to same size img_shape = min(ndvi.shape, blackfill.shape) ndvi = np.ma.array(crop(ndvi, img_shape), mask=crop(blackfill, img_shape)) return ndvi plot.show(read_masked_ndvi(get_img_path(clear_ids[0], file_paths), get_udm2_path(clear_ids[0], file_paths))) """ Explanation: The field AOI isn't an exact square so there are some blank pixels. Let's mask those out. We can use the UDM for that. End of explanation """ def read_masked_ndvi_by_id(iid, file_paths): return read_masked_ndvi(get_img_path(iid, file_paths), get_udm2_path(iid, file_paths)) plot.show(read_masked_ndvi_by_id(clear_ids[0], file_paths)) """ Explanation: That looks better! We now have the NDVI values for the pixels within the field AOI. Now, lets make that a little easier to generate. End of explanation """ # we demonstrated visualization in the best practices tutorial # here, we save space by just importing the functionality from visual import show_ndvi # and here's what it looks like when we visualize as ndvi # (data range -1 to 1). it actually looks worse becaue the # pixel value range is so small show_ndvi(read_masked_ndvi_by_id(clear_ids[0], file_paths)) """ Explanation: In the images above, we are just using the default visualization for the imagery. But this is NDVI imagery. Values are given between -1 and 1. Let's see how this looks if we use visualization specivic to NDVI. End of explanation """ def block_aoi_masks(block_aois, ref_img_path): # find the coordinate reference system of the image with rasterio.open(ref_img_path) as src: dst_crs = src.crs # geojson features (the field block geometries) # are always given in WGS84 # project these to the image coordinates wgs84 = pyproj.CRS('EPSG:4326') project = pyproj.Transformer.from_crs(wgs84, dst_crs, always_xy=True).transform proj_block_aois = [transform(project, shape(b)) for b in block_aois] masks = [raster_geometry_mask(src, [b], crop=False)[0] for b in proj_block_aois] return masks ref_img_path = get_img_path(clear_ids[0], file_paths) block_masks = block_aoi_masks(block_aois, img) ndvi = read_masked_ndvi_by_id(clear_ids[0], file_paths) fig, ax = plt.subplots(2,3, figsize=(15,10)) axf = ax.flatten() fig.delaxes(axf[-1]) for i, mask in enumerate(block_masks): ndvi.mask = mask plot.show(ndvi, ax=axf[i]) """ Explanation: Well, the contrast has certainly gone down. This is because the NDVI values within the field are pretty uniform. That's what we would expect for a uniform field! So it is actually good news. The NDVI values are pretty low, ranging from 0.16 to just above 0.22. The time range used for this search is basically the month of April. This is pretty early in the growth season and so likely the plants are still tiny seedlings. So even the low NDVI value makes sense here. Part 3: Sample Field Blocks Ok, here is where we convert pixels to tabular data. We do this for one image then we expand to doing this for all images in the time series. In this section, we want to sample the pixel values within each field block and put the values into a table. For this, we first need to identify the field block pixels. Next, we calculate the median, mean, variance, and random point value for each field block. We put those into a table. And at the end we visualize the results. Step 1: Get Field Block Pixels In this step, we find the pixel values that are associated with each field block. To get the field block pixels, we have to project the block geometries into the image coordinates. Then we create masks that just pull the field block pixels from the aoi. End of explanation """ np.random.seed(0) # 0 - make random sampling repeatable, no arg - nonrepeatable def random_mask_sample(mask, count): # get shape of unmasked pixels unmasked = mask == False unmasked_shape = mask[unmasked].shape # uniformly sample pixel indices num_unmasked = unmasked_shape[0] idx = np.random.choice(num_unmasked, count, replace=False) # assign uniformly sampled indices to False (unmasked) random_mask = np.ones(unmasked_shape, dtype=np.bool) random_mask[idx] = False # reshape back to image shape and account for image mask random_sample_mask = np.ones(mask.shape, dtype=np.bool) random_sample_mask[unmasked] = random_mask return random_sample_mask # lets just check out how our random sampling performs ndvi = read_masked_ndvi_by_id(clear_ids[0], file_paths) ndvi.mask = random_mask_sample(ndvi.mask, 13) plot.show(ndvi) ndvi = read_masked_ndvi_by_id(clear_ids[0], file_paths) ndvi.mask = random_mask_sample(ndvi.mask, 1300) plot.show(ndvi) """ Explanation: Step 2: Random Sampling Summary statistics such as mean, mode, and variance will be easy to calculate with the numpy python package. We need to do a little work to get random sampling, however. End of explanation """ def get_stats(ndvi, masks): def _get_stats(mask, block_number): block = np.ma.array(ndvi, mask=mask) mean = np.ma.mean(block) median = np.ma.median(block) var = np.ma.var(block) random_mask = random_mask_sample(block.mask, 1) random_val = np.ma.mean(np.ma.array(block, mask=random_mask)) return {'block': block_number, 'mean': mean, 'median': median, 'variance': var, 'random': random_val} data = [_get_stats(m, i) for i, m in enumerate(masks)] df = pd.DataFrame(data) return df ndvi = read_masked_ndvi_by_id(clear_ids[0], file_paths) get_stats(ndvi, block_masks) """ Explanation: Ok, great! The first image shows what would result from sampling 13 pixels. The second image is for nearly all the pixels and demonstrates that the mask is taken into account with sampling. Now lets get down to calculating the summary statistics and placing them in a table entry. Step 3: Prepare Table of Summary Statistics Now that we have all the tools we need, we are ready to calculate summary statistics for each field block and put them into a table. We will calculate the median, mean, variance, and single random point value for each field block. End of explanation """ def get_stats_by_id(iid, block_masks, file_paths): ndvi = read_masked_ndvi_by_id(iid, file_paths) ndvi_stats = get_stats(ndvi, block_masks) acquired = get_acquired(iid) ndvi_stats['acquired'] = [acquired]*len(block_masks) return ndvi_stats def get_acquired(iid): metadata_path = get_metadata(iid, file_paths) with open(metadata_path) as src: md = json.load(src) return md['properties']['acquired'] def get_metadata(img_id, file_paths): filename = '{}_metadata.json'.format(img_id) return next(f for f in file_paths if f.name == filename) get_stats_by_id(clear_ids[0], block_masks, file_paths) dfs = [get_stats_by_id(i, block_masks, file_paths) for i in clear_ids] all_stats = pd.concat(dfs) all_stats """ Explanation: Okay! We have statistics for each block in a table. Yay! Okay, now lets move on to running this across a time series. Step 4: Perform Time Series Analysis End of explanation """ colors = {0:'red', 1:'blue', 2:'green', 3:'black', 4:'purple'} df = all_stats stats = ['mean', 'median', 'random', 'variance'] fig, axes = plt.subplots(2, 2, sharex=True, figsize=(15,15)) # print(dir(axes[0][0])) for stat, ax in zip(stats, axes.flatten()): ax.scatter(df['acquired'], df[stat], c=df['block'].apply(lambda x: colors[x])) ax.set_title(stat) plt.sca(ax) plt.xticks(rotation=90) plt.show() """ Explanation: Okay! We have 165 rows, which is (number of blocks)x(number of images). It all checks out. Lets check out these stats in some plots! In these plots, color indicates the blocks. The blocks are colored red, blue, green, black, and purple. The x axis is acquisition time. So each 'column' of colored dots is the block statistic value for a given image. End of explanation """
igabr/Metis_Projects_Chicago_2017
05-project-kojack/Notebook_4_DataFrame_Creation_Modeling.ipynb
mit
%run helper_functions.py %run filters.py %run plotly_functions.py import quandl from datetime import date from tabulate import tabulate from collections import Counter from IPython.display import Image import math import string %matplotlib inline plt.rcParams["figure.figsize"] = (15,10) plt.rcParams["xtick.labelsize"] = 16 plt.rcParams["ytick.labelsize"] = 16 plt.rcParams["axes.labelsize"] = 20 plt.rcParams['legend.fontsize'] = 20 plt.style.use('fivethirtyeight') pd.set_option('display.max_colwidth', -1) import plotly.plotly as py import plotly.graph_objs as go import spacy nlp = spacy.load("en") nltk_stopwords = stopwords.words("english")+["rt", "via","-»","--»","--","---","-->","<--","->","<-","«--","«","«-","»","«»", " →", "→"] punc = '#!"%&\'()*+,-./:;<=>?@[\\]^_`{|}~' from nltk.sentiment.vader import SentimentIntensityAnalyzer from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet tweet_df = unpickle_object("clean_df_NB3_Complete.pkl") tweet_df.sort_values(by=["date", "hour_of_day"], ascending=False, inplace=True) tweet_df.reset_index(inplace=True) del tweet_df['index'] tweet_df['date'] = pd.to_datetime(tweet_df['date']) tweet_df.head() tweet_df.shape bitcoin_data = quandl.get("BCHARTS/BITSTAMPUSD", authtoken="Xyrsw1vVBaJx1p1z9dQ5", start_date="2016-10-26", end_date="2017-02-22") bitcoin_data.drop("Volume (BTC)", axis=1, inplace=True) bitcoin_data.reset_index(inplace=True) bitcoin_data.rename(columns={"Date":"date"}, inplace=True) bitcoin_data['date'] = pd.to_datetime(bitcoin_data['date']) bitcoin_data.sort_values(by="date", ascending=False, inplace=True) bitcoin_data.reset_index(inplace=True) del bitcoin_data['index'] bitcoin_data.head() bitcoin_data.shape gold_data = quandl.get("LBMA/GOLD", authtoken="Xyrsw1vVBaJx1p1z9dQ5", start_date="2016-10-26", end_date="2017-02-22") gold_data.drop(["USD (PM)", "GBP (AM)", "GBP (PM)", "EURO (AM)", "EURO (PM)"],axis=1, inplace=True) gold_data.reset_index(inplace=True) gold_data.rename(columns={"USD (AM)":"gold_price", "Date":"date"}, inplace=True) gold_data['date'] = pd.to_datetime(gold_data['date']) gold_data.sort_values(by="date", ascending=False, inplace=True) gold_data.reset_index(inplace=True) del gold_data['index'] gold_data.head() gold_data.shape eth_data = pd.read_csv("/Users/ibrahimgabr/Downloads/project-5/Data/ethereum_price.csv") eth_data.rename(columns={"Date":"date"}, inplace=True) eth_data['date'] = pd.to_datetime(eth_data['date']) eth_data.head() eth_data['eth_price'] = (eth_data['Open'] + eth_data['Close'])/2 #weighted price. mask = (eth_data['date'] > "2016-10-25") & (eth_data['date'] <= "2017-02-22") eth_data = eth_data[mask] eth_data.reset_index(inplace=True) del eth_data['index'] eth_data.drop(['Open', 'High', 'Low', 'Close', 'Volume', 'Market Cap'], axis=1, inplace=True) eth_data.head() eth_data.head() eth_data.shape tweet_and_gold = pd.merge(tweet_df, gold_data, on='date', how="outer") tweet_and_gold.head() final_dummy_df = pd.merge(tweet_and_gold, eth_data, on='date', how='outer') final_dummy_df.head() """ Explanation: Notebook 4 In this notebook, I will create the overall dataframe needed to start our modelling process. NOTE: The data used for BTC prices was later updated by using data from blockchain.info instead of the quandl API. Furthermore, in later notebooks, I add additional data from blockchain.info that may not be contained in the code below. End of explanation """ lemmatimzed_tweets = [] for i in range(final_dummy_df.shape[0]): tweet_text = final_dummy_df.iloc[i,1] tokenized = nlp(tweet_text) whole_tweet = [] for token in tokenized: if token.is_space: continue elif token.is_punct: continue elif token.text in nltk_stopwords: continue elif token.text in punc: continue elif token.is_stop: continue elif token.is_digit: continue else: whole_tweet.append(token.lemma_) tweet = " ".join(whole_tweet) lemmatimzed_tweets.append(tweet) for i in lemmatimzed_tweets[:5]: print(i) print() final_dummy_df['lemmatized_tweets'] = lemmatimzed_tweets final_dummy_df.head() pickle_object(final_dummy_df, "final_dummy_df_V1") final_df = unpickle_object("final_dummy_df_V1.pkl") final_df.head() final_df['hour_of_day']. """ Explanation: Lemmatization The code block below lemmatizes the tweets using the spaCy package! To lemmatize a word is to take it back to its root! i.e. {dancing, danced, dances} --> dance. End of explanation """ sentiment = SentimentIntensityAnalyzer() positive_sentiment = [] negative_sentiment = [] neutral_sentiment = [] compound_sentiment = [] for i in range(final_df.shape[0]): sent_dict = sentiment.polarity_scores(final_df.iloc[i, 5]) positive_sentiment.append(sent_dict['pos']) negative_sentiment.append(sent_dict['neg']) neutral_sentiment.append(sent_dict['neu']) compound_sentiment.append(sent_dict['compound']) final_df['pos_sent'] = positive_sentiment final_df['neg_sent'] = negative_sentiment final_df['neu_sent'] = neutral_sentiment final_df['compound_sent'] = compound_sentiment final_df.head() final_df.loc[:, ["pos_sent", "neg_sent", "neu_sent", "compound_sent"]].corr() percentage_missing(final_df) """ Explanation: Sentiment The code block below calculates the sentiment for every tweet in out dataframe. I use the VADER sentiment library! Documentation here: https://github.com/cjhutto/vaderSentiment End of explanation """ bitcoin_data = quandl.get("BCHARTS/BITSTAMPUSD", authtoken="Xyrsw1vVBaJx1p1z9dQ5", start_date="2016-10-26", end_date="2017-02-22") bitcoin_data.drop("Volume (BTC)", axis=1, inplace=True) bitcoin_data.reset_index(inplace=True) bitcoin_data.rename(columns={"Date":"date"}, inplace=True) bitcoin_data['date'] = pd.to_datetime(bitcoin_data['date']) bitcoin_data.sort_values(by="date", ascending=False, inplace=True) bitcoin_data.reset_index(inplace=True) del bitcoin_data['index'] bitcoin_data.head() complete_df = pd.merge(final_df, bitcoin_data, on='date', how='outer') complete_df.head() features = ["gold_price", "eth_price", "pos_sent", "neg_sent", "neu_sent", "compound_sent", "Open", "High", "Low", "Close", "Volume (Currency)", "Weighted Price"] modelling_df = complete_df.groupby("date").mean()[features] modelling_df.reset_index(inplace=True) modelling_df.sort_values(by='date', inplace=True) modelling_df.head() # pickle_object(complete_df, "final_dummy_df_V2") # pickle_object(modelling_df, "modelling_df_V1") """ Explanation: Let's now merge our bitcoin data! End of explanation """
jagarzone6/cmos
notebooks/CMOS- Taller 6 de Octubre.ipynb
mit
from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/fig-20-15.png')) """ Explanation: CMOS - Taller 6 de OCtubre Simulacion del circuito de la figura 20,15 (Beta-Multiplier) End of explanation """ from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/long-channel-mosfet.png')) from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/table-9-1.png')) """ Explanation: Caracteristicas usadas en las simulaciones: NIVEL=3, canal largo, tabla 9.1 End of explanation """ from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/Fig_20_15_ngspice_Level_3.png')) """ Explanation: Simulacion en NGSPICE Archivo: Fig-20-15-LEVEL-3.cir Fig-20-15-LEVEL-3.cir Barrido de VDD de 0 a 10 V End of explanation """ from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/Fig_20_15_ngspice_Level_3_transitory.png')) """ Explanation: Fig-20-15-LEVEL-3.cir Transitorio de 0 a 1n Segundo, condicines iniciales v(gateM1)=0 v(drainM2)=0 End of explanation """ from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/Fig_20_15_ngspice_Level_3_transitory_2.png')) """ Explanation: Fig-20-15-LEVEL-3.cir Transitorio de 0 a 1n Segundo, condicines iniciales v(gateM1)=5 v(drainM2)=0 End of explanation """ from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/Fig_20_15_electric_sch.png')) """ Explanation: Diseño en Electric Circuito en esquematico: End of explanation """ from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/Fig_20_15_electric_sch_2.png')) """ Explanation: Circuito en esquematico para exportar a Spice: End of explanation """ from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/Fig_20_15_electric_preferences.png')) """ Explanation: Edicion de las preferencias para seleccionar nivel 3 End of explanation """ from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/Fig_20_15_ltspice_sch.png')) """ Explanation: Simulacion en LTSpice, Archivo: Current_mirror_bias_SIM_long_channel.spi , Barrido de VDD de 0 a 6 V End of explanation """ from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/Fig_20_15_electric_layout.png')) """ Explanation: Circuito en LayOut: End of explanation """ from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/Fig_20_15_electric_layout_sim.png')) from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/Fig_20_15_electric_layout_2.png')) """ Explanation: Circuito en LayOut para exportar a Spice: End of explanation """ from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/Fig_20_15_ltspice_layout.png')) """ Explanation: Simulacion en LTSpice, Archivo: Current_mirror_bias_layout_SIM.spi , Barrido de VDD de 0 a 6 V End of explanation """ from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/Fig_20_15_jelib.png')) """ Explanation: Libreria final, archivo: Fig-20-15-Sim.jelib End of explanation """ from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/Fig_20_22.png')) """ Explanation: Simulacion del circuito de la figura 20,22 (Improved current reference ) End of explanation """ from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/short_channel_model.png')) from IPython.core.display import Image, display display(Image(url='images/taller-oct-6/Table_9_2.png')) """ Explanation: Caracteristicas usadas en las simulaciones: - NIVEL=54, canal corto, tabla 9.2 End of explanation """
matthiaskoenig/tellurium-web
api/api.ipynb
lgpl-3.0
BASE_URL = "http://127.0.0.1:8001" import os import coreapi import json import pandas as pd # some of the functionality requires authentication auth = coreapi.auth.BasicAuthentication( username='mkoenig', password=os.environ['DJANGO_ADMIN_PASSWORD'] ) client = coreapi.Client(auth=auth) # get the api scema document = client.get(BASE_URL + "/api/") print(document.title) print(document.url) """ Explanation: tellurium-web REST API Client The following tutorial demonstrates how to access tellurium-web via the python API. To run the notebook create a virtualenv with the requirements cd api mkvirtualenv teweb-api --python=python3 (teweb-api) pip install api-requirements.txt (teweb-api) python -m ipykernel install --user --name=teweb-api (teweb-api) jupyter notebook api.ipynb Alternatively you can install the dependency via pip pip install coreapi End of explanation """ print(document["archives"]) """ Explanation: COMBINE archives Overview over available functionality, keys in [] are optional params End of explanation """ print(document["tags"]) """ Explanation: Tags Overview over available functionality, keys in [] are optional params End of explanation """ print(document["users"]) """ Explanation: Users Overview over available functionality, keys in [] are optional params End of explanation """ data = client.action(document,["archives", "list"]) df = pd.read_json(json.dumps(data, indent=4)) print(df.head()) """ Explanation: Examples queries Get all archives End of explanation """ data = client.action(document,["archives", "list"], params={"tags": 1}) print(json.dumps(data, indent=4)) """ Explanation: Search for archives with given tag Uses the tag pk to search. End of explanation """ data = client.action(document,["tags", "list"]) df = pd.read_json(json.dumps(data, indent=4)) df """ Explanation: Get all tags End of explanation """ data = client.action(document,["tags", "list"], params={"type":"format"} ) # print(json.dumps(data,indent=4)) df = pd.read_json(json.dumps(data, indent=4)) df """ Explanation: Search for tags by type End of explanation """ data = client.action(document,["users", "list"]) df = pd.read_json(json.dumps(data, indent=4)) df """ Explanation: Get all users End of explanation """ data = client.action(document,["users", "list",], params={"search":"ja"} ) print(json.dumps(data,indent=4)) # search in archives for SBML data = client.action(document,["archives", "list"], params={"search":"sbml"} ) print(json.dumps(data, indent=4)) """ Explanation: search for users End of explanation """
mldbai/mldb
container_files/demos/Recommending Movies.ipynb
apache-2.0
from pymldb import Connection mldb = Connection() """ Explanation: Recommending Movies The MovieLens 20M dataset contains 20 million user ratings from 1 to 5 of thousands of movies. In this demo we'll build a simple recommendation system which will use this data to suggest 25 movies based on a seed movie you provide. The notebook cells below use pymldb's Connection class to make REST API calls. You can check out the Using pymldb Tutorial for more details. End of explanation """ %%bash mkdir -p /mldb_data/data curl "file://mldb/mldb_test_data/ml-20m.zip" 2>/dev/null > /mldb_data/data/ml-20m.zip unzip /mldb_data/data/ml-20m.zip -d /mldb_data/data %%bash head /mldb_data/data/ml-20m/README.txt %%bash head /mldb_data/data/ml-20m/ratings.csv """ Explanation: Download the MovieLens 20M data We'll start by using some command-line tools to download and decompress the data. End of explanation """ %%time print mldb.put('/v1/procedures/import_mvlns', { "type": "import.text", "params": { "dataFileUrl":"file:///mldb_data/data/ml-20m/ratings.csv", "outputDataset": "mvlns_ratings_csv", "runOnCreation": True } }) print mldb.put('/v1/procedures/process_mvlns', { "type": "transform", "params": { "inputData": """ select pivot(movieId, rating) as * named userId from mvlns_ratings_csv group by userId """, "outputDataset": "mvlns_ratings", "runOnCreation": True } }) """ Explanation: Load the data into MLDB See the Loading Data Tutorial guide for more details on how to get data into MLDB. Here we load a text file and use the pivot aggregator to create a sparse matrix representation of the ratings. End of explanation """ mldb.query("select * from mvlns_ratings limit 3") """ Explanation: Take a peek at the dataset We'll use the Query API. Each row is a user, each column is a movie, and the cell value is the rating the user gave the movie. End of explanation """ print mldb.put('/v1/procedures/mvlns_svd', { "type" : "svd.train", "params" : { "trainingData" : "select COLUMN EXPR (where rowCount() > 3) from mvlns_ratings", "columnOutputDataset" : "mvlns_svd_embedding", "modelFileUrl": "file://models/mvlns.svd", "functionName": "mvlns_svd_embedder", "runOnCreation": True } }) """ Explanation: Singular Value Decomposition (SVD) We will create and run a Procedure of type svd.train. This creates an embedding dataset where each row is a movie and the columns represent coordinates in a 100-dimensional space. Similar movies end up closer to each other than dissimilar movies. End of explanation """ from ipywidgets import interact, interact_manual from uuid import uuid4 print mldb.put('/v1/procedures/import_movies', { "type": "import.text", "params": { "dataFileUrl":"file:///mldb_data/data/ml-20m/movies.csv", "outputDataset": "movies", "select": "title, movieId", "named": "movieId", "runOnCreation": True } }) """ Explanation: Explore the results! Our dataset has movieIds but humans think about movie names so we'll load up the movie names in a dataset. End of explanation """ @interact def movie_search(x = "toy story"): return mldb.query("select title from movies where regex_match(lower(title), '.*%s.*')" % x.strip().lower()) """ Explanation: A simple search function to find all movies (and corresponding movieIds) whose names contain a string. End of explanation """ print mldb.put("/v1/datasets/mvlns_user_prefs", {"type": "sparse.mutable"}) print mldb.put("/v1/functions/preferences", { "type": "sql.query", "params": { "query": "select {*} as p from mvlns_user_prefs where rowName()=$user" } }) def save_prefs(user_id, likes, dislikes): for rating, search_terms in zip([5,1],[likes, dislikes]): for x in search_terms.split(","): if len(x) > 3: mldb.post("/v1/datasets/mvlns_user_prefs/rows", { "rowName":user_id, "columns": [[str(m), rating, 0] for m in movie_search(x).index] }) mldb.post("/v1/datasets/mvlns_user_prefs/commit", {}) save_prefs("janedoe", "Toy Story", "Terminator") mldb.query("select preferences({ user: 'janedoe' })[p] as *") """ Explanation: Now let's create a dataset to hold user preferences, and a simple function to simulate a user rating movies they like and movies they dislike, based on the movie_search function above. End of explanation """ print mldb.put("/v1/functions/nearest_movies", { "type": "embedding.neighbors", "params": { "dataset": "mvlns_svd_embedding", "defaultNumNeighbors": 25, "columnName": "embedding" } }) print mldb.put("/v1/functions/recommendations", { "type": "sql.query", "params": { "query": """ select nearest_movies({ coords: mvlns_svd_embedder({ row: preferences({ user: $user })[p] })[embedding] })[distances] as r """ } }) """ Explanation: With all that done, we can now build a recommendation engine out of a simple SQL query by mapping a user's preferences into the same space as the movie embeddings (i.e. embedding the user's preferences) and looking for the nearest movies. End of explanation """ def recommend(likes="Toy Story, Terminator", dislikes="Star Trek"): # here we simulate a new user saving these preferences user_id = str(uuid4()) save_prefs(user_id, likes, dislikes) # we can then run an SQL query to: # - retrieve recommendations # - transpose and join them to movies to get titles # - exclude the already-rated movies from the result return mldb.query(""" select m.title named m.movieId from transpose(( select recommendations({ user: '%(user)s' }) )) as r join movies as m on r.rowPathElement(2) = m.rowPathElement(0) where m.movieId not in (keys of preferences({ user: '%(user)s' })[p]) order by r.result """ % dict(user=user_id)) recommend(likes="Toy Story, Terminator", dislikes="Star Trek") """ Explanation: Here's a simple function which lets you simulate the results of liking and disliking certain movies and getting back the resulting recommendations. End of explanation """ interact_manual(recommend) """ Explanation: Here's an interactive form that lets you play with this function to see if you agree with the recommendations! NOTE: the interactive part of this demo only works if you're running this Notebook live, not if you're looking at a static copy on http://docs.mldb.ai. See the documentation for Running MLDB. End of explanation """
SXBK/kaggle
mercedes-benz/Mercedes-Benz.ipynb
gpl-3.0
#Drop quantitative features for which most samples take 0 or 1 for cols in quan: if train_c[cols].mean() < 0.01 or train_c[cols].mean() > 0.99: train_c.drop(cols, inplace=True, axis=1) test_c.drop(cols, inplace=True, axis=1) #For now we only use the quantitative features left to make predictions quan_features = train_c.columns[8:-1] from sklearn.metrics import r2_score from sklearn.model_selection import GridSearchCV import warnings warnings.filterwarnings('ignore') """ Explanation: There is a lot of room for feature engineering the 8 qualitative features, but we'll reserve it for later End of explanation """ from sklearn.linear_model import Ridge ridge = Ridge() ridge_cv = GridSearchCV(estimator=ridge, param_grid={'alpha':np.arange(1, 50, 1)}, cv=5) ridge_cv.fit(train_c[quan_features], train_c.label) ridge_cv.best_score_ from sklearn.linear_model import Lasso lasso = Lasso() lasso_cv = GridSearchCV(estimator=lasso, param_grid={'alpha':np.arange(0, 0.05, 0.005)}, cv=5) lasso_cv.fit(train_c[quan_features], train_c.label) lasso_cv.best_score_ from sklearn.ensemble import RandomForestRegressor rf = RandomForestRegressor() params = {'max_depth':np.arange(5,8), 'min_samples_split':np.arange(3, 6)} rf_cv = GridSearchCV(estimator=rf, param_grid=params, cv=5) rf_cv.fit(train_c[quan_features], train_c.label) rf_cv.best_score_ from sklearn.linear_model import ElasticNet en = ElasticNet() params = {'alpha':np.arange(0.01, 0.05, 0.005), 'l1_ratio': np.arange(0.1, 0.9, 0.1)} en_cv = GridSearchCV(estimator=en, param_grid=params, cv=5) en_cv.fit(train_c[quan_features], train_c.label) en_cv.best_score_ from mlxtend.regressor import StackingRegressor from sklearn.linear_model import LinearRegression lin=LinearRegression() basic_regressors= [ridge_cv.best_estimator_, lasso_cv.best_estimator_, rf_cv.best_estimator_, en_cv.best_estimator_] stacker=StackingRegressor(regressors=basic_regressors, meta_regressor=lin) stacker.fit(train_c[quan_features], train_c.label) pred = stacker.predict(train_c[quan_features]) r2_score(train_c.label, pred) result = pd.DataFrame() result['ID']=test.ID result['y']=stacker.predict(test_c[quan_features]) result.to_csv('./stackedprediction.csv', index=False) """ Explanation: From now we try a range of estimators and use GridSearch to iteratively tune their hyperparameters End of explanation """
essicolo/GCI733-A2015
isothermes.ipynb
mit
%pylab inline def freundlich(C, kp, b): S = kp*C**b return(S) def langmuir(C, Smax, kp): S = C*kp*Smax/(1+kp*C) return(S) conc = linspace(num = 11, start = 0, stop = 10, endpoint=True) S_freundlich1 = freundlich(C = conc, kp = 1, b = 0.1) S_freundlich2 = freundlich(C = conc, kp = 1, b = 0.5) S_freundlich3 = freundlich(C = conc, kp = 1, b = 1) plot(conc, S_freundlich1, 'r-o', conc, S_freundlich2, 'b-o', conc, S_freundlich3, 'g-o') S_freundlich1 = freundlich(C = conc, kp = 1, b = 0.2) S_freundlich2 = freundlich(C = conc, kp = 2, b = 0.2) S_freundlich3 = freundlich(C = conc, kp = 3, b = 0.2) plot(conc, S_freundlich1, 'r-o', conc, S_freundlich2, 'b-o', conc, S_freundlich3, 'g-o') S_langmuir1 = langmuir(C = conc, kp = 0.5, Smax = 0.5) S_langmuir2 = langmuir(C = conc, kp = 1, Smax = 0.5) S_langmuir3 = langmuir(C = conc, kp = 2, Smax = 0.5) S_langmuir4 = langmuir(C = conc, kp = 5, Smax = 0.5) S_langmuir5 = langmuir(C = conc, kp = 10, Smax = 0.5) plot(conc, S_langmuir1, 'r-o', conc, S_langmuir2, 'b-o', conc, S_langmuir3, 'g-o', conc, S_langmuir4, 'y-o', conc, S_langmuir5, 'c-o') """ Explanation: Problème 1 Visualiser les isothermes de Freundlich et de Langmuir End of explanation """ data = abs(S_langmuir2 + 0.08 * np.random.randn(11)) plot(conc, data, 'ro') conc """ Explanation: Problème 2 Lisser des données simulées avec la fonction d'isotherme de Langmuir. Créer des données simulées End of explanation """ S_guess = langmuir(C = conc, Smax = 0.5, b = 1) plot(conc, data, 'ro', conc, S_guess, 'b-') from scipy.optimize import curve_fit fit = curve_fit(langmuir, conc, data, p0=(1, 0.5)) fit S_fit = langmuir(C = conc, Smax = fit[0][0], kp = fit[0][1]) plot(conc, data, 'ro', conc, S_fit, 'b-') """ Explanation: Lisser les données 1- Estimer des paramètres de départ 2- Lisser avec la fonciton curve_fit, incluse dans la librairie scipy, sous la section optimize End of explanation """
kubeflow/pipelines
components/google-cloud/google_cloud_pipeline_components/experimental/tensorflow_probability/anomaly_detection/tfp_anomaly_detection.ipynb
apache-2.0
import os # The Google Cloud Notebook product has specific requirements IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version") # Google Cloud Notebook requires dependencies to be installed with '--user' USER_FLAG = "" if IS_GOOGLE_CLOUD_NOTEBOOK: USER_FLAG = "--user" ! pip3 install {USER_FLAG} --upgrade kfp ! pip3 install {USER_FLAG} --upgrade google-cloud-pipeline-components ! pip3 install {USER_FLAG} --upgrade tensorflow ! pip3 install {USER_FLAG} --upgrade matplotlib ! pip3 install {USER_FLAG} --upgrade numpy ! pip3 install {USER_FLAG} --upgrade pandas """ Explanation: <table align="left"> <td> <a href="https://colab.research.google.com/github/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/experimental/tensorflow_probability/anomaly_detection/tfp_anomaly_detection.ipynb""> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab </a> </td> <td> <a href="https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/experimental/tensorflow_probability/anomaly_detection/tfp_anomaly_detection.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> </table> Anomaly Detection with TensorFlow Probability STS on Kubeflow Pipelines Overview This notebook demonstrates how to use TensorFlow Probability and Kubeflow Pipelines for anomaly detection in time series data. It uses structural time series (STS), a class of Bayesian statistical models, to decompose a time series into interpretable seasonal and trend components. This algorithm fits an STS model to the time series, generates a forecast of acceptable values for each timestep, and flags any points outside of the forecast as an anomaly. To learn more about STS models, check out this demo on Structural Time Series Modeling Case Studies. This demo is most relevant for those who would like to automatically flag anomalies in time series data and can be used for applications like network monitoring, infrastructure maintenance, and sales tracking. Dataset This demo uses the Numenta Anomaly Benchmark, a popular benchmark of time series data with labeled anomalies. More specifically, our demo uses nyc_taxi.csv which reports the total number of passengers in NYC taxis from July 2014 to January 2015 in 30-minute increments. Objective You will go through the following steps: * Define and launch an anomaly detection algorithm on Kubeflow Pipelines. * Retrieve and visualize results. * Benchmark predictions using the Numenta Anomaly Benchmark scoring method. Costs This tutorial uses billable components of Google Cloud: Vertex AI Cloud Storage Learn about Vertex AI pricing and Cloud Storage pricing, and use the Pricing Calculator to generate a cost estimate based on your projected usage. Set up your local development environment If you are using Colab or Google Cloud Notebooks, your environment already meets all the requirements to run this notebook. You can skip this step. Otherwise, make sure your environment meets this notebook's requirements. You need the following: The Google Cloud SDK Git Python 3 virtualenv Jupyter notebook running in a virtual environment with Python 3 The Google Cloud guide to Setting up a Python development environment and the Jupyter installation guide provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions: Install and initialize the Cloud SDK. Install Python 3. Install virtualenv and create a virtual environment that uses Python 3. Activate the virtual environment. To install Jupyter, run pip3 install jupyter on the command-line in a terminal shell. To launch Jupyter, run jupyter notebook on the command-line in a terminal shell. Open this notebook in the Jupyter Notebook Dashboard. Install additional packages Install additional package dependencies not installed in your notebook environment. End of explanation """ # Automatically restart kernel after installs import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) """ Explanation: Restart the kernel After you install the additional packages, you need to restart the notebook kernel so it can find the packages. End of explanation """ import os # Get your Google Cloud project ID from gcloud if not os.getenv("IS_TESTING"): shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID: ", PROJECT_ID) """ Explanation: Before you begin Set up your Google Cloud project The following steps are required, regardless of your notebook environment. Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs. Make sure that billing is enabled for your project. Enable the Vertex AI API, Cloud Build API, Cloud Storage API, and Container Registry API. If you are running this notebook locally, you will need to install the Cloud SDK. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $ into these commands. Set your project ID If you don't know your project ID, you may be able to get your project ID using gcloud. End of explanation """ if PROJECT_ID == "" or PROJECT_ID is None: PROJECT_ID = "[your-project-id]" # @param {type:"string"} !gcloud config set project {PROJECT_ID} """ Explanation: Otherwise, set your project ID here. End of explanation """ from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") """ Explanation: Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial. End of explanation """ import os import sys # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # The Google Cloud Notebook product has specific requirements IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version") # If on Google Cloud Notebooks, then don't execute this code if not IS_GOOGLE_CLOUD_NOTEBOOK: if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' """ Explanation: Authenticate your Google Cloud account If you are using Google Cloud Notebooks, your environment is already authenticated. Skip this step. If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. Otherwise, follow these steps: In the Cloud Console, go to the Create service account key page. Click Create service account. In the Service account name field, enter a name, and click Create. In the Grant this service account access to project section, click the Role drop-down list. Type "Vertex AI" into the filter box, and select Vertex AI Administrator. Type "Storage Object Admin" into the filter box, and select Storage Object Admin. Click Create. A JSON file that contains your key downloads to your local environment. Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell. End of explanation """ BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} REGION = "[your-region]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP """ Explanation: Create a Cloud Storage bucket The following steps are required, regardless of your notebook environment. When you submit a training job, Vertex AI saves all resources to the given GCS bucket. We will also use the same bucket to download and host the input data. Set the name of your Cloud Storage bucket below. It must be unique across all Cloud Storage buckets. You may also change the REGION variable, which is used for operations throughout the rest of this notebook. Make sure to choose a region where Vertex AI services are available. You may not use a Multi-Regional Storage bucket for training with Vertex AI. End of explanation """ ! gsutil mb -l $REGION $BUCKET_NAME """ Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket. End of explanation """ ! gsutil ls -al $BUCKET_NAME """ Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents: End of explanation """ PIPELINE_NAME = '{0}-{1}'.format('tfp-anomaly-detection', TIMESTAMP) PIPELINE_ROOT = '{0}/{1}'.format(BUCKET_NAME, PIPELINE_NAME) from typing import Callable, Optional, Mapping, Any import kfp from kfp.v2 import compiler from kfp.v2 import dsl from kfp.v2.google.client import AIPlatformClient from kfp.v2.dsl import Input, Output, Dataset """ Explanation: Import libraries and define constants End of explanation """ preprocess_op = kfp.components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/google-cloud/google_cloud_pipeline_components/experimental/tensorflow_probability/anomaly_detection/preprocess.yaml') anomaly_detection_op = kfp.components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/google-cloud/google_cloud_pipeline_components/experimental/tensorflow_probability/anomaly_detection/component.yaml') postprocess_op = kfp.components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/google-cloud/google_cloud_pipeline_components/experimental/tensorflow_probability/anomaly_detection/postprocess.yaml') """ Explanation: Define the anomaly detection components Here you will load components from the anomaly_detection folder in the Google Cloud Pipeline Components SDK. You can also save and modify the original Python component file. For example, for tfp_anomaly_detection.py: Call generate_component_file() which creates a yaml file. Replace the next cell with anomaly_detection_op = kfp.components.load_component_from_file('component.yaml') The components do the following: * preprocess: Regularizes and resamples a time series. * tfp_anomaly_detection: Infers the structure of the time series, fits the model, and identifies anomalies based on the predictive distribution of acceptable values at each timestep. * postprocess: Fills missing values from regularizing and resampling. End of explanation """ @dsl.pipeline( pipeline_root=PIPELINE_ROOT, name=PIPELINE_NAME) def pipeline(input_url: str, memory_limit: str, seed: int) -> None: """ Train model and return detected anomalies. """ input_task = kfp.dsl.importer( artifact_uri=input_url, artifact_class=Dataset) preprocess_task = preprocess_op(input_dataset=input_task.output) anomaly_detection_task = anomaly_detection_op(input_dataset=preprocess_task.output, seed=seed).set_memory_limit(memory_limit) postprocess_op(input_dataset=input_task.output, predictions_dataset=anomaly_detection_task.output) def run_pipeline(pipeline: Callable, parameter_values: Optional[Mapping[str, Any]] = {}, enable_caching: bool = False) -> None: """Runs a given pipeline function using Kubeflow Pipelines. Args: pipeline: The function to run. parameter_values: Parameters passed to the pipeline function when run. enable_caching: Whether to used cached results from previous runs. """ compiler.Compiler().compile( pipeline_func=pipeline, package_path='{}_pipeline.json'.format(PIPELINE_NAME)) api_client = AIPlatformClient( project_id=PROJECT_ID, region=REGION, ) _ = api_client.create_run_from_job_spec( job_spec_path='{}_pipeline.json'.format(PIPELINE_NAME), pipeline_root=PIPELINE_ROOT, parameter_values=parameter_values, enable_caching=enable_caching) """ Explanation: Define the pipeline Here you will define the relationship between the components and how data is passed. In this pipeline a Google Cloud Storage csv is imported, the data is preprocessed, anomalies are flagged, and the results are postprocessed so that the output csv is scoreable by the Numenta Anomaly Benchmark. End of explanation """ import os NAB_DATA_BLOB = '{0}/NAB'.format(BUCKET_NAME) if not os.path.exists('content/NAB'): !git clone https://github.com/numenta/NAB !gsutil cp -r NAB/data $NAB_DATA_BLOB # Find the full file path in gcs for the chosen task import tensorflow as tf chosen_task_folder = 'realKnownCause' chosen_task = 'nyc_taxi' nab_files = tf.io.gfile.glob('{0}/*/*.csv'.format(NAB_DATA_BLOB)) chosen_task_file = [file for file in nab_files if chosen_task in file][0] print('The pipeline will be run on the task: {0}'.format(chosen_task)) """ Explanation: Download the data Here you will download the Numenta Anomaly Benchmark and upload the dataset to your GCS bucket. We will then find the exact GCS file url associated with the chosen task to pass as the input url into the pipeline. End of explanation """ parameter_values = { 'input_url': chosen_task_file, 'memory_limit': '50G', 'seed': 0, } run_pipeline(pipeline, parameter_values=parameter_values) """ Explanation: Run the pipeline Finally, we run the pipeline. Please wait until the run has completed before proceeding to the next steps. End of explanation """ import pandas as pd import numpy as np import json gcs_file = '[your-pipeline-output]' # @param {type:'string'} output_file = '/content/{0}-{1}.csv'.format(chosen_task, TIMESTAMP) !gsutil cp $gcs_file $output_file # Collect targets specifically for the chosen task targets = json.load(open('/content/NAB/labels/combined_labels.json')) chosen_task_targets = [targets[key] for key in targets if chosen_task in key][0] """ Explanation: Download the results locally Copy the GCS file path from the final postprocess step of the pipeline below. Here we will save this output locally for visualization and scoring. End of explanation """ #@title Plotting setup from matplotlib import pylab as plt from matplotlib.lines import Line2D def plot_predictions(predictions: pd.DataFrame, annotation_fn: Callable = lambda timestamp: timestamp) -> None: """ Plots the time series, forecast, detected anomalies, and residuals. Args: predictions: The output of the anomaly detection algorithm. """ # Drop NaN values during plotting predictions = predictions.dropna(how='any') predictions = predictions.reset_index() timestamp = pd.to_datetime(predictions['timestamp'], format='%Y-%m-%d') # Plot the value from predictions which may be # an aggregation of the original value value = np.array(predictions['value_predictions']) lower_limit = np.array(predictions['lower_limit']) upper_limit = np.array(predictions['upper_limit']) mean = np.array(predictions['mean']) anomalies = np.array(predictions['label']).nonzero()[0] targets = [] if 'target' in predictions: targets = np.array(predictions['target']).nonzero()[0] fig = plt.figure(figsize=(10, 5), constrained_layout=True) spec = fig.add_gridspec(ncols=1, nrows=2, height_ratios=[2., 1.]) series_ax = fig.add_subplot(spec[0, 0]) residuals_ax = fig.add_subplot(spec[1, 0], sharex=series_ax) # Plot anomalies on series_ax series_ax.plot( timestamp, value, color='black', alpha=0.6) series_ax.fill_between( timestamp, lower_limit, upper_limit, color='tab:blue', alpha=0.3) for anomaly_idx in anomalies: x = timestamp[anomaly_idx] y = value[anomaly_idx] series_ax.scatter(x, y, s=100, alpha=0.4, c='red') for target_idx in targets: x = timestamp[target_idx] y = value[target_idx] series_ax.scatter(x, y, s=100, alpha=0.4, c='green') series_ax.annotate(annotation_fn(x), (x, y)) # Plot residuals on residuals_ax time_delta = timestamp[1] - timestamp[0] residuals_ax.bar( timestamp, height=upper_limit - lower_limit, bottom=lower_limit - mean, width=time_delta, align='center', color='tab:blue', alpha=0.3) residuals_ax.bar( timestamp, width=time_delta, height=value - mean, align='center', color='black', alpha=0.6) # Set up grid styling series_ax.set_ylabel('Original series') residuals_ax.set_ylabel('Residuals') series_ax.grid(True, color='whitesmoke') residuals_ax.grid(True, color='whitesmoke') series_ax.set_axisbelow(True) residuals_ax.set_axisbelow(True) # Add title and legend series_ax.set_title('TFP STS model forecast, anomalies, and residuals for {0}'.format(chosen_task)) create_legend_label = lambda label, color: Line2D([0], [0], marker='o', color='w', label=label, markerfacecolor=color, markersize=10) legend_elements = [create_legend_label(label, color) for label, color in [('predicted anomaly', 'red'), ('target', 'green')]] series_ax.legend(handles=legend_elements, loc='lower right') # Round target timestamps to day for plotting round_to_day = lambda timestamp: timestamp.split()[0] rounded_targets = [round_to_day(timestamp) for timestamp in chosen_task_targets] rounded_targets = set(rounded_targets) predictions = pd.read_csv(output_file) predictions['target'] = predictions.apply(lambda df: round_to_day(df['timestamp']) in rounded_targets, axis=1) # Change the start and end to view different slices of the prediction start, end = 8000, 9000 round_annotation = lambda timestamp: timestamp.date() plot_predictions(predictions.iloc[start:end], round_annotation) """ Explanation: Visualize the results Here we will plot the forecast distribution outputted by the pipeline, the points flagged as anomalies (red), and the ground truth targets (green). The graph is plotted with daily granularity due to the resampling done during preprocessing. Note how the algorithm correctly identifies December 25th as an anomaly. End of explanation """ # Set up NAB folder for running scoring %cd /content/NAB !pip install . --user !python scripts/create_new_detector.py --detector $PIPELINE_NAME # Move gcs output into the NAB results folder structure results_file = 'results/{0}/{1}/{0}_{2}.csv'.format(PIPELINE_NAME, chosen_task_folder, chosen_task) !cp $output_file $results_file # Run the scoring script !python run.py -d $PIPELINE_NAME --optimize --score --normalize #@title Score collection and normalization setup import glob def collect_scores(profile_name: str, chosen_task: str) -> pd.DataFrame: """Crawls through results files for all detectors in NAB to get results for the chosen task. Args: profile_name: One of 'standard', 'low_FP_rate', 'low_FN_rate'. chosen_task: The chosen benchmark task. Returns: all_scores_df: A pandas DataFrame of results for the task sorted by highest to lowest score. """ all_scores = [] for scores_file in glob.glob('/content/NAB/results/**/*_{0}_scores.csv'.format(profile_name)): scores_df = pd.read_csv(scores_file) chosen_task_row = scores_df[scores_df['File'].str.contains(chosen_task).fillna(False)] all_scores.append(chosen_task_row) all_scores_df = pd.concat(all_scores) all_scores_df = all_scores_df.sort_values(by=['Score'], ascending=False) all_scores_df = all_scores_df.reset_index().drop('index', axis=1) return all_scores_df def normalize_scores(results: pd.DataFrame, profile_name: str, profiles: dict, tpCount: int) -> pd.DataFrame: """Normalizes scores with the max from a perfect detector and the min from a null detector. Args: results: Pandas DataFrame with score results. profile_name: One of 'standard', 'low_FP_rate', 'low_FN_rate'. profiles: Dictionary containing cost matrix for each profile. tpCount: The number of true positives in the ground truth targets. Returns: The results DataFrame with an added column of normalized scores. """ perfect = tpCount * profiles[profile_name]["CostMatrix"]["tpWeight"] # Note that the null detector's name is NaN in the `Detector` column base = results[pd.isna(results['Detector'])]['Score'].iloc[0] scores = results['Score'] results['Normalized_Score'] = 100 * (scores - base) / (perfect - base) # Reindex column order for more organized table columns = results.columns.to_list() columns.remove('Score') columns.remove('Normalized_Score') columns += ['Score', 'Normalized_Score'] results = results.reindex(columns=columns) print('Normalization used min raw score: {0} and max raw score: {1}'.format(base, perfect)) return results """ Explanation: Run scoring Here we quantitatively score the algorithm's performance on the Numenta Anomaly Benchmark. The benchmark uses a custom scoring mechanism described in their paper. Unlike precision and recall which do not reward for early detection, this scoring mechanism rewards based on windows around anomalous points rather than the exact points themselves. We will run the scoring script with the --optimize flag, which uses the anomaly_scores column to score and optimizes the decision threshold. If this flag is omitted, then the script will only use the label column originally outputted by the component. End of explanation """ tpCount = len(chosen_task_targets) profile_name = 'standard' profiles = json.load(open('/content/NAB/config/profiles.json')) profiles results = collect_scores(profile_name, chosen_task) results = normalize_scores(results, profile_name, profiles, tpCount) results """ Explanation: NAB also provides scores for three profile settings: standard, reward_low_FN_rate, and reward_low_FP_rate. If you run the cell below you can see the cost matrix for each profile, where reward_low_FN_rate penalizes false negatives more and reward_low_FP_rate penalizes false positives more. For example, if for the NYC Taxi & Limousine Commission it is worse to not have enough taxis during a big event than it is to have too many, then they may want to score based on a reward_low_FN_rate profile. For the purposes of this demo we will only display results for the standard profile. End of explanation """
neurodata/ndmg
tutorials/Tutorial_For_QA_Registration.ipynb
apache-2.0
import os import nibabel as nb import matplotlib.image as mpimg from m2g.utils.gen_utils import get_braindata, get_filename from m2g.utils.qa_utils import get_min_max, opaque_colorscale, pad_im from argparse import ArgumentParser from scipy import ndimage from matplotlib.colors import LinearSegmentedColormap from nilearn.plotting.edge_detect import _edge_map as edge_map import matplotlib.pyplot as plt %matplotlib inline """ Explanation: QA REGISTRATION Give a visual result for regisration part to help user check results of each step. The graph includes three dimensions(sagittal slice, coronal slice and axial slice) and uses pink and green to indicate the registered output and reference image. Setup Steps First we have to import all relevant functions used in this qa code. qa_reg uses m2g functions from qa_utils and gen_utils. End of explanation """ def reg_mri_pngs( mri, atlas, outdir, loc=0, mean=False, minthr=2, maxthr=95, edge=False ): """ A function to create and save registered brain slice figures. Parameter --------- mri: nifti file the registered brain file generated in each registration step. atlas: nifti file the reference brain file used in each registration step. outdir: str directory where output png file is saved. loc: int which dimension of the 4d brain data to use mean: bool whether to calculate the mean of the 4d brain data If False, the loc=0 dimension of the data (mri_data[:, :, :, loc]) is used minthr: int lower percentile threshold maxthr: int upper percentile threshold """ atlas_data = nb.load(atlas).get_data() mri_data = nb.load(mri).get_data() if mri_data.ndim == 4: # 4d data, so we need to reduce a dimension if mean: mr_data = mri_data.mean(axis=3) else: mr_data = mri_data[:, :, :, loc] else: # dim=3 mr_data = mri_data cmap1 = LinearSegmentedColormap.from_list("mycmap1", ["white", "magenta"]) cmap2 = LinearSegmentedColormap.from_list("mycmap2", ["white", "green"]) fig = plot_overlays(atlas_data, mr_data, [cmap1, cmap2], minthr, maxthr, edge) # name and save the file fig.savefig(outdir + "/" + "{"+get_filename(mri) +"}"+ "2" +"{"+get_filename(atlas)+"}"+ ".png", format="png") #plt.close() """ Explanation: Saving figures Save the result returned by function plot_overlays and name the file as "{registered image}2{reference image}". End of explanation """ def plot_overlays(atlas, b0, cmaps=None, minthr=2, maxthr=95, edge=False): """ A function to plot the overlay figures of registered and reference brain slices. Parameter --------- atlas: str, nifti image, numpy.ndarray an object to open the data for a registered brain. Can be a string (path to a brain file), nibabel.nifti1.nifti1image, or a numpy.ndarray. b0: str, nifti image, numpy.ndarray an object to open the data for a reference brain. Can be a string (path to a brain file), nibabel.nifti1.nifti1image, or a numpy.ndarray. cmap: Colormap objects based on lookup tables using linear segments. minthr: int lower percentile threshold maxthr: int upper percentile threshold edge: bool whether to use normalized luminance data If None, the respective min and max of the color array is used. Returns --------- foverlay: matplotlib.figure.Figure """ plt.rcParams.update({"axes.labelsize": "x-large", "axes.titlesize": "x-large"}) foverlay = plt.figure() atlas = get_braindata(atlas) b0 = get_braindata(b0) if atlas.shape != b0.shape: raise ValueError("Brains are not the same shape.") if cmaps is None: cmap1 = LinearSegmentedColormap.from_list("mycmap1", ["white", "magenta"]) cmap2 = LinearSegmentedColormap.from_list("mycmap2", ["white", "green"]) cmaps = [cmap1, cmap2] if b0.shape == (182, 218, 182): x = [78, 90, 100] y = [82, 107, 142] z = [88, 103, 107] else: brain_volume = b0.shape x = [int(brain_volume[0] * 0.35), int(brain_volume[0] * 0.51), int(brain_volume[0] * 0.65)] y = [int(brain_volume[1] * 0.35), int(brain_volume[1] * 0.51), int(brain_volume[1] * 0.65)] z = [int(brain_volume[2] * 0.35), int(brain_volume[2] * 0.51), int(brain_volume[2] * 0.65)] coords = (x, y, z) atlas = pad_im(atlas, max(brain_volume[0:3]), 0, False) b0 = pad_im(b0, max(brain_volume[0:3]), 0, False) x = [int(max(brain_volume[0:3]) * 0.35), int(max(brain_volume[0:3]) * 0.51), int(max(brain_volume[0:3]) * 0.65)] y = [int(max(brain_volume[0:3]) * 0.35), int(max(brain_volume[0:3]) * 0.51), int(max(brain_volume[0:3]) * 0.65)] z = [int(max(brain_volume[0:3]) * 0.35), int(max(brain_volume[0:3]) * 0.51), int(max(brain_volume[0:3]) * 0.65)] coords = (x, y, z) labs = [ "Sagittal Slice", "Coronal Slice", "Axial Slice", ] var = ["X", "Y", "Z"] # create subplot for first slice # and customize all labels idx = 0 if edge: min_val = 0 max_val = 1 else: min_val, max_val = get_min_max(b0, minthr, maxthr) for i, coord in enumerate(coords): for pos in coord: idx += 1 ax = foverlay.add_subplot(3, 3, idx) ax.set_title(var[i] + " = " + str(pos)) if i == 0: image = ndimage.rotate(b0[pos, :, :], 90) atl = ndimage.rotate(atlas[pos, :, :], 90) elif i == 1: image = ndimage.rotate(b0[:, pos, :], 90) atl = ndimage.rotate(atlas[:, pos, :], 90) else: image = b0[:, :, pos] atl = atlas[:, :, pos] if idx % 3 == 1: ax.set_ylabel(labs[i]) ax.yaxis.set_ticks([0, image.shape[0] / 2, image.shape[0] - 1]) ax.xaxis.set_ticks([0, image.shape[1] / 2, image.shape[1] - 1]) if edge: image = edge_map(image).data image[image > 0] = max_val image[image == 0] = min_val #Set the axis invisible plt.xticks([]) plt.yticks([]) #Set the frame invisible ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.spines['left'].set_visible(False) ax.imshow(atl, interpolation="none", cmap=cmaps[0], alpha=0.9) ax.imshow( opaque_colorscale( cmaps[1], image, alpha=0.9, vmin=min_val, vmax=max_val ) ) #set the legend if idx == 3: plt.plot(0,0,"-",c="pink",label='registered') plt.plot(0,0,"-",c="green",label='reference') plt.legend(loc='best',fontsize=12,bbox_to_anchor=(1.5,1.5)) #Set title for the whole picture a,b,c = brain_volume title = 'QA For Registration. Brain Volume:'+ str(a) +'*'+ str(b) + '*' + str(c)+'\n' foverlay.suptitle(title,fontsize=24) foverlay.set_size_inches(12.5, 10.5, forward=True) foverlay.tight_layout() return foverlay """ Explanation: Generating the qa overlay figures First overlap the registered image and the reference image, then select the appropriate position to cut nine slices {3 slices each from sagittal, axial and coronal} and return the figure foverlay. End of explanation """ mri = r"/Users/xueminzhu/Desktop/test/t1w_aligned_mni.nii.gz" atlas = r"/Users/xueminzhu/Desktop/test/MNI152_T1_2mm_brain.nii.gz" output_dir = r"/Users/xueminzhu/Desktop/test" reg_mri_pngs(mri, atlas,output_dir) """ Explanation: Input and Graphs Change location of mri, atlas and output_dir after running m2g on desired dataset to your output folder Anatomical Registration Parameters: mri: str the registered output of the intermediate steps of the registration. atlas: str the reference image used in Registration step. output_dir: str the str where you save the output png file. End of explanation """ mri = r"/Users/xueminzhu/Desktop/test/desikan_space-MNI152NLin6_res-2x2x2_reor_RAS_nores_aligned_atlas.nii.gz" atlas = r"/Users/xueminzhu/Desktop/test/nodif_B0.nii.gz" output_dir = r"/Users/xueminzhu/Desktop/test" reg_mri_pngs(mri, atlas,output_dir) """ Explanation: This is a good example of well-registed images where pink and green are used to represent the registered output and reference image. The registration step of this image is first using function flirt and then fnirt in FSL. Atlas Registration Parameters: mri: str the registered output of the final step of the registration. atlas: str the reference image used in this registration step. output_dir: str the str where you save the output png file. End of explanation """ bad_example = r"/Users/xueminzhu/Desktop/test/example.png" display(Image.open(bad_example)) """ Explanation: This is a final image of the registration step where pink and green are used to represent registered output and reference image. A bad output may be missing some slices of the brain or too small, which may be due to the small volume of the picture. The following figure is an example. End of explanation """
ucsd-ccbb/jupyter-genomics
notebooks/rnaSeq/Functional_Enrichment_Analysis_Pathway_Visualization.ipynb
mit
#Import Python modules import os import pandas import qgrid import mygene #Change directory os.chdir("/data/test") """ Explanation: ToppGene & Pathway Visualization Authors: N. Mouchamel, L. Huang, T. Nguyen, K. Fisch Email: Kfisch@ucsd.edu Date: June 2016 Goal: Create Jupyter notebook that runs an enrichment analysis in ToppGene through the API and runs Pathview to visualize the significant pathways outputted by ToppGene. toppgene website: https://toppgene.cchmc.org/enrichment.jsp Steps: 1. Read in differentially expressed gene list. 2. Convert differentially expressed gene list to xml file as input to ToppGene API. 3. Run enrichment analysis of DE genes through ToppGene API. 4. Parse ToppGene API results from xml to csv and Pandas data frame. 5. Display results in notebook. 6. Extract just the KEGG pathwway IDs from the ToppGene output. 7. Manually switch from Python2 to R kernel. 8. Extract entrez ID and log2FC from the input DE genes. 9. Create vector of significant pathways from ToppGene. 10. Run Pathview (https://bioconductor.org/packages/release/bioc/html/pathview.html) in R to create colored pathway maps. 11. Manually switch from R kernel to Python2. 12. Display each of the significant pathway colored overlay diagrams in the jupyter notebook. End of explanation """ #Read in DESeq2 results genes=pandas.read_csv("DE_genes.csv") #View top of file genes.head(10) #Extract genes that are differentially expressed with a pvalue less than a certain cutoff (pvalue < 0.05 or padj < 0.05) genes_DE_only = genes.loc[(genes.pvalue < 0.05)] #View top of file genes_DE_only.head(10) #Check how many rows in original genes file len(genes) #Check how many rows in DE genes file len(genes_DE_only) """ Explanation: Read in differential expression results as a Pandas data frame to get differentially expressed gene list End of explanation """ #Extract list of DE genes (Check to make sure this code works, this was adapted from a different notebook) de_list = genes_DE_only[genes_DE_only.columns[0]] #Remove .* from end of Ensembl ID de_list2 = de_list.replace("\.\d","",regex=True) #Add new column with reformatted Ensembl IDs genes_DE_only["Full_Ensembl"] = de_list2 #View top of file genes_DE_only.head(10) #Set up mygene.info API and query mg = mygene.MyGeneInfo() gene_ids = mg.getgenes(de_list2, 'name, symbol, entrezgene', as_dataframe=True) gene_ids.index.name = "Ensembl" gene_ids.reset_index(inplace=True) #View top of file gene_ids.head(10) #Merge mygene.info query results with original DE genes list DE_with_ids = genes_DE_only.merge(gene_ids, left_on="Full_Ensembl", right_on="Ensembl", how="outer") #View top of file DE_with_ids.head(10) #Write results to file DE_with_ids.to_csv("./DE_genes_converted.csv") #Dataframe to only contain gene symbol DE_with_ids=pandas.read_csv("./DE_genes_converted.csv") cols = DE_with_ids.columns.tolist() cols.insert(0, cols.pop(cols.index('symbol'))) for_xmlfile = DE_with_ids.reindex(columns= cols) #Condense dataframe to contain only gene symbol for_xmlfile.drop(for_xmlfile.columns[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,12,13,14]], axis=1, inplace=True) #Exclude NaN values for_xmlfile.dropna(axis=0, how='any', thresh=None, subset=None, inplace=True) #View top of file for_xmlfile.head(10) #Write results to file for_xmlfile.to_csv("./for_xmlfile.csv", index=False) #.XML file generator from gene list in .csv file import xml.etree.cElementTree as ET import xml.etree.cElementTree as ElementTree import lxml #Root element of .xml "Tree" root=ET.Element("requests") #Title/identifier for the gene list inputted into ToppGene API #Name it whatever you like doc=ET.SubElement(root, "toppfun", id= "nicole's gene list") config=ET.SubElement(doc, "enrichment-config") gene_list=ET.SubElement(doc, "trainingset") gene_list.set('accession-source','HGNC') #For gene symbol in gene_list #Parse through gene_list to create the .xml file toppgene = pandas.read_csv("./for_xmlfile.csv") for i in toppgene.ix[:,0]: gene_symbol = i gene = ET.SubElement(gene_list, "gene") gene.text= gene_symbol tree = ET.ElementTree(root) #Function needed for proper indentation of the .xml file def indent(elem, level=0): i = "\n" + level*" " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: indent(elem, level+1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i indent(root) import xml.dom.minidom from lxml import etree #File to write the .xml file to #Include DOCTYPE with open('/data/test/test.xml', 'w') as f: f.write('<?xml version="1.0" encoding="UTF-8" ?><!DOCTYPE requests SYSTEM "https://toppgene.cchmc.org/toppgenereq.dtd">') ElementTree.ElementTree(root).write(f, 'utf-8') #Display .xml file xml = xml.dom.minidom.parse('/data/test/test.xml') pretty_xml_as_string = xml.toprettyxml() print(pretty_xml_as_string) """ Explanation: Translate Ensembl IDs to Gene Symbols and Entrez IDs using mygene.info API End of explanation """ !curl -v -H 'Content-Type: text/xml' --data @/data/test/test.xml -X POST https://toppgene.cchmc.org/api/44009585-27C5-41FD-8279-A5FE1C86C8DB > /data/test/testoutfile.xml #Display output .xml file import xml.dom.minidom xml = xml.dom.minidom.parse("/data/test/testoutfile.xml") pretty_xml_as_string = xml.toprettyxml() print(pretty_xml_as_string) """ Explanation: Run ToppGene API Include path for the input .xml file and path and name of the output .xml file. Outputs all 17 features of ToppGene. End of explanation """ import xml.dom.minidom import pandas as pd import numpy #Parse through .xml file def load_parse_xml(data_file): """Check if file exists. If file exists, load and parse the data file. """ if os.path.isfile(data_file): print "File exists. Parsing..." data_parse = ET.ElementTree(file=data_file) print "File parsed." return data_parse xmlfile = load_parse_xml("/data/test/testoutfile.xml") #Generate array of annotation arrays for .csv file root_tree = xmlfile.getroot() gene_list=[] for child in root_tree: child.find("enrichment-results") new_array = [] array_of_arrays=[] for type in child.iter("enrichment-result"): count = 0 for annotation in type.iter("annotation"): array_of_arrays.append(new_array) new_array = [] new_array.append(type.attrib['type']) new_array.append(annotation.attrib['name']) new_array.append(annotation.attrib['id']) new_array.append(annotation.attrib['pvalue']) new_array.append(annotation.attrib['genes-in-query']) new_array.append(annotation.attrib['genes-in-term']) new_array.append(annotation.attrib['source']) for gene in annotation.iter("gene"): gene_list.append(gene.attrib['symbol']) new_array.append(gene_list) gene_list =[] count+= 1 print "Number of Annotations for ToppGene Feature - %s: " % type.attrib['type'] + str(count) print "Total number of significant gene sets from ToppGene: " + str(len(array_of_arrays)) #print array_of_arrays #Convert array of annotation arrays into .csv file (to be viewed as dataframe) import pyexcel data = array_of_arrays pyexcel.save_as(array = data, dest_file_name = '/data/test/results.csv') #Reading in the .csv ToppGene results df=pandas.read_csv('/data/test/results.csv', header=None) #Label dataframe columns df.columns=['ToppGene Feature','Annotation Name','ID','pValue','Genes-in-Query','Genes-in-Term','Source','Genes'] """ Explanation: Parse ToppGene results into Pandas data frame End of explanation """ #Dataframe for GeneOntologyMolecularFunction df.loc[df['ToppGene Feature'] == 'GeneOntologyMolecularFunction'] #Dataframe for GeneOntologyBiologicalProcess df.loc[df['ToppGene Feature'] == 'GeneOntologyBiologicalProcess'] #Dataframe for GeneOntologyCellularComponent df.loc[df['ToppGene Feature'] == 'GeneOntologyCellularComponent'] #Dataframe for Human Phenotype df.loc[df['ToppGene Feature'] == 'HumanPheno'] #Dataframe for Mouse Phenotype df.loc[df['ToppGene Feature'] == 'MousePheno'] #Dataframe for Domain df.loc[df['ToppGene Feature'] == 'Domain'] #Dataframe for Pathways df.loc[df['ToppGene Feature'] == 'Pathway'] #Dataframe for Pubmed df.loc[df['ToppGene Feature'] == 'Pubmed'] #Dataframe for Interactions df.loc[df['ToppGene Feature'] == 'Interaction'] #Dataframe for Cytobands df.loc[df['ToppGene Feature'] == 'Cytoband'] #Dataframe for Transcription Factor Binding Sites df.loc[df['ToppGene Feature'] == 'TranscriptionFactorBindingSite'] #Dataframe for Gene Family df.loc[df['ToppGene Feature'] == 'GeneFamily'] #Dataframe for Coexpression df.loc[df['ToppGene Feature'] == 'Coexpression'] #DataFrame for Coexpression Atlas df.loc[df['ToppGene Feature'] == 'CoexpressionAtlas'] #Dataframe for Computational df.loc[df['ToppGene Feature'] == 'Computational'] #Dataframe for MicroRNAs df.loc[df['ToppGene Feature'] == 'MicroRNA'] #Dataframe for Drugs df.loc[df['ToppGene Feature'] == 'Drug'] #Dataframe for Diseases df.loc[df['ToppGene Feature'] == 'Disease'] """ Explanation: Display the dataframe of each ToppGene feature End of explanation """ #Number of significant KEGG pathways total_KEGG_pathways = df.loc[df['Source'] == 'BioSystems: KEGG'] print "Number of significant KEGG pathways: " + str(len(total_KEGG_pathways.index)) df = df.loc[df['Source'] == 'BioSystems: KEGG'] df.to_csv('/data/test/keggpathways.csv', index=False) mapping_df = pandas.read_csv('/data/test/KEGGmap.csv') mapping_df = mapping_df.loc[mapping_df['Organism'] == 'Homo sapiens '] mapping_df.head(10) """ Explanation: Extract the KEGG pathway IDs from the ToppGene output (write to csv file) End of explanation """ #Create array of KEGG IDs that correspond to the significant pathways outputted by ToppGene KEGG_ID_array = [] for ID in df.ix[:,2]: x = int(ID) for index,BSID in enumerate(mapping_df.ix[:,0]): y = int(BSID) if x == y: KEGG_ID_array.append(mapping_df.get_value(index,1,takeable=True)) print KEGG_ID_array #Transform array into KEGG ID dataframe KEGG_IDs = pandas.DataFrame() KEGG_IDs['KEGG ID'] = KEGG_ID_array KEGG_IDs.to_csv('/data/test/keggidlist.csv', index=False) no_KEGG_ID = pandas.read_csv('/data/test/keggpathways.csv') KEGG_IDs = pandas.read_csv('/data/test/keggidlist.csv') #Append KEGG ID dataframe to dataframe containing the significant pathways outputted by ToppGene KEGG_ID_included = pd.concat([no_KEGG_ID, KEGG_IDs], axis = 1) KEGG_ID_included.to_csv('/data/test/KEGG_ID_included.csv', index=False) KEGG_ID_included """ Explanation: Create dataframe that includes the KEGG IDs that correspond to the significant pathways outputted by ToppGene End of explanation """ #Set working directory working_dir <- "/data/test" setwd(working_dir) date <- Sys.Date() #Set R options options(jupyter.plot_mimetypes = 'image/png') options(useHTTPS=FALSE) options(scipen=500) #Load R packages from CRAN and Bioconductor require(limma) require(edgeR) require(DESeq2) require(RColorBrewer) require(cluster) library(gplots) library(SPIA) library(graphite) library(PoiClaClu) library(ggplot2) library(pathview) library(KEGG.db) library(mygene) library(splitstackshape) library(reshape) library(hwriter) library(ReportingTools) library("EnrichmentBrowser") library(IRdisplay) library(repr) library(png) """ Explanation: Run Pathview to map and render user data on the pathway graphs outputted by ToppGene Switch to R kernel here End of explanation """ #Extract entrez ID and log2FC from the input DE genes #Read in differential expression results as a Pandas data frame to get differentially expressed gene list #Read in DE_genes_converted results (generated in jupyter notebook) genes <- read.csv("DE_genes_converted.csv")[,c('entrezgene', 'log2FoldChange')] #Remove NA values genes<-genes[complete.cases(genes),] head(genes,10) #Transform data frame into matrix (gene.data in Pathview only takes in a matrix formatted data) genedata<-matrix(c(genes[,2]),ncol=1,byrow=TRUE) rownames(genedata)<-c(genes[,1]) colnames(genedata)<-c("log2FoldChange") genedata <- as.matrix(genedata) head(genedata,10) """ Explanation: Create matrix-like structure to contain entrez ID and log2FC for gene.data input End of explanation """ #Read in pathways that you want to map to (from toppgene pathway results) #Store as a vector pathways <- read.csv("/data/test/keggidlist.csv") head(pathways, 12) pathways.vector<-as.vector(pathways$KEGG.ID) pathways.vector #Loop through all the pathways in pathways.vector #Generate Pathview pathways for each one (native KEGG graphs) i<-1 for (i in pathways.vector){ pv.out <- pathview(gene.data = genedata[, 1], pathway.id = i, species = "hsa", out.suffix = "toppgene_native_kegg_graph", kegg.native = T) #str(pv.out) #head(pv.out$plot.data.gene) } #Loop through all the pathways in pathways.vector #Generate Pathview pathways for each one (Graphviz layouts) i<-1 for (i in pathways.vector){ pv.out <- pathview(gene.data = genedata[, 1], pathway.id = i, species = "hsa", out.suffix = "toppgene_graphviz_layout", kegg.native = F) str(pv.out) head(pv.out$plot.data.gene) #head(pv.out$plot.data.gene) } """ Explanation: Create vector containing the KEGG IDs of all the significant target pathways End of explanation """ #Display native KEGG graphs import matplotlib.image as mpimg import matplotlib.pyplot as plt import pandas %matplotlib inline #for loop that iterates through the pathway images and displays them pathways = pandas.read_csv("/data/test/keggidlist.csv") pathways for i in pathways.ix[:,0]: image = i address = "/data/test/%s.toppgene_native_kegg_graph.png" % image img = mpimg.imread(address) plt.imshow(img) plt.gcf().set_size_inches(50,50) print i plt.show() """ Explanation: Display each of the signficant pathway colored overlay diagrams Switch back to py27 kernel here End of explanation """ #Import more python modules import sys #To access visJS_module and entrez_to_symbol module sys.path.append(os.getcwd().replace('/data/test', '/data/CCBB_internal/interns/Lilith/PathwayViz')) import visJS_module from ensembl_to_entrez import entrez_to_symbol import networkx as nx import matplotlib.pyplot as plt import pymongo from itertools import islice import requests import math import spectra from bioservices.kegg import KEGG import imp imp.reload(visJS_module) #Latex rendering of text in graphs import matplotlib as mpl mpl.rc('text', usetex = False) mpl.rc('font', family = 'serif') % matplotlib inline s = KEGG() #Lowest p value pathway #But you can change the first parameter in pathways.get_value to see different pathways in the pathways list! pathway = pathways.get_value(0,0, takeable=True) print pathway address = "/data/test/%s.xml" % pathway #Parse pathway's xml file and get the root of the xml file tree = ET.parse(address) root = tree.getroot() res = s.parse_kgml_pathway(pathway) print res['relations'] print res['entries'] G=nx.DiGraph() #Add nodes to networkx graph for entry in res['entries']: G.add_node(entry['id'], entry ) print len(G.nodes(data=True)) #Get symbol of each node temp_node_id_array = [] for node, data in G.nodes(data=True): if data['type'] == 'gene': if ' ' not in data['name']: G.node[node]['symbol'] = data['gene_names'].split(',', 1)[0] else: result = data['name'].split("hsa:") result = ''.join(result) result = result.split() for index, gene in enumerate(result): if index == 0: gene_symbol = str(entrez_to_symbol(gene)) else: gene_symbol = gene_symbol + ', ' + str(entrez_to_symbol(gene)) G.node[node]['symbol'] = gene_symbol elif data['type'] == 'compound': gene_symbol = s.parse(s.get(data['name']))['NAME'] G.node[node]['gene_names'] = ' '.join(gene_symbol) G.node[node]['symbol'] = gene_symbol[0].replace(';', '') print G.nodes(data=True) #Get x and y coordinates for each node seen_coord = set() coord_array = [] dupes_coord = [] for entry in root.findall('entry'): node_id = entry.attrib['id'] graphics = entry.find('graphics') if (graphics.attrib['x'], graphics.attrib['y']) in seen_coord: G.node[node_id]['x'] = (int(graphics.attrib['x']) + .1) * 2.5 G.node[node_id]['y'] = (int(graphics.attrib['y']) + .1) * 2.5 seen_coord.add((G.node[node_id]['x'], G.node[node_id]['y'])) print node_id else: seen_coord.add((graphics.attrib['x'], graphics.attrib['y'])) G.node[node_id]['x'] = int(graphics.attrib['x']) * 2.5 G.node[node_id]['y'] = int(graphics.attrib['y']) * 2.5 print dupes_coord print seen_coord #Handle undefined nodes comp_dict = dict() node_to_comp = dict() comp_array_total = [] #Array containing all component nodes for entry in root.findall('entry'): #Array to store components of undefined nodes component_array = [] if entry.attrib['name'] == 'undefined': node_id = entry.attrib['id'] #Find components for index, component in enumerate(entry.iter('component')): component_array.append(component.get('id')) #Check to see which elements are components comp_array_total.append(component.get('id')) node_to_comp[component.get('id')] = node_id #Store into node dictionary G.node[node_id]['component'] = component_array comp_dict[node_id] = component_array #Store gene names gene_name_array = [] for index, component_id in enumerate(component_array): if index == 0: gene_name_array.append(G.node[component_id]['gene_names']) else: gene_name_array.append('\n' + G.node[component_id]['gene_names']) G.node[node_id]['gene_names'] = gene_name_array #Store gene symbols gene_symbol_array = [] for index, component_id in enumerate(component_array): if index == 0: gene_symbol_array.append(G.node[component_id]['symbol']) else: gene_symbol_array.append('\n' + G.node[component_id]['symbol']) G.node[node_id]['symbol'] = gene_symbol_array print G.node edge_list = [] edge_pairs = [] #Add edges to networkx graph #Redirect edges to point to undefined nodes containing components in order to connect graph for edge in res['relations']: source = edge['entry1'] dest = edge['entry2'] if (edge['entry1'] in comp_array_total) == True: source = node_to_comp[edge['entry1']] if (edge['entry2'] in comp_array_total) == True: dest = node_to_comp[edge['entry2']] edge_list.append((source, dest, edge)) edge_pairs.append((source,dest)) #Check for duplicates if (source, dest) in G.edges(): name = [] value = [] link = [] name.append(G.edge[source][dest]['name']) value.append(G.edge[source][dest]['value']) link.append(G.edge[source][dest]['link']) name.append(edge['name']) value.append(edge['value']) link.append(edge['link']) G.edge[source][dest]['name'] = '\n'.join(name) G.edge[source][dest]['value'] = '\n'.join(value) G.edge[source][dest]['link'] = '\n'.join(link) else: G.add_edge(source, dest, edge) print G.edges(data=True) edge_to_name = dict() for edge in G.edges(): edge_to_name[edge] = G.edge[edge[0]][edge[1]]['name'] print edge_to_name #Set colors of edges edge_to_color = dict() for edge in G.edges(): if 'activation' in G.edge[edge[0]][edge[1]]['name']: edge_to_color[edge] = 'green' elif 'inhibition' in G.edge[edge[0]][edge[1]]['name']: edge_to_color[edge] = 'red' else: edge_to_color[edge] = 'blue' print edge_to_color #Remove component nodes from graph G.remove_nodes_from(comp_array_total) #Get nodes in graph nodes = G.nodes() numnodes = len(nodes) print numnodes print G.node #Get symbol of nodes node_to_symbol = dict() for node in G.node: if G.node[node]['type'] == 'map': node_to_symbol[node] = G.node[node]['gene_names'] else: if 'symbol' in G.node[node]: node_to_symbol[node] = G.node[node]['symbol'] elif 'gene_names'in G.node[node]: node_to_symbol[node] = G.node[node]['gene_names'] else: node_to_symbol[node] = G.node[node]['name'] #Get name of nodes node_to_gene = dict() for node in G.node: node_to_gene[node] = G.node[node]['gene_names'] #Get x coord of nodes node_to_x = dict() for node in G.node: node_to_x[node] = G.node[node]['x'] #Get y coord of nodes node_to_y = dict() for node in G.node: node_to_y[node] = G.node[node]['y'] #Log2FoldChange DE_genes_df = pandas.read_csv("/data/test/DE_genes_converted.csv") DE_genes_df.head(10) short_df = DE_genes_df[['_id', 'Ensembl', 'log2FoldChange']] short_df.head(10) short_df.to_dict('split') #Remove NA values gene_to_log2fold = dict() for entry in short_df.to_dict('split')['data']: if isinstance(entry[0], float): if math.isnan(entry[0]): gene_to_log2fold[entry[1]] = entry[2] else: gene_to_log2fold[entry[0]] = entry[2] else: gene_to_log2fold[entry[0]] = entry[2] print gene_to_log2fold #Create color scale with negative as green and positive as red my_scale = spectra.scale([ "green", "#CCC", "red" ]).domain([ -4, 0, 4 ]) id_to_log2fold = dict() for node in res['entries']: log2fold_array = [] if node['name'] == 'undefined': print 'node is undefined' elif node['type'] == 'map': print 'node is a pathway' else: #print node['name'] result = node['name'].split("hsa:") result = ''.join(result) result = result.split() #print result for item in result: if item in gene_to_log2fold.keys(): log2fold_array.append(gene_to_log2fold[item]) if len(log2fold_array) > 0: id_to_log2fold[node['id']] = log2fold_array print id_to_log2fold #Color nodes based on log2fold data node_to_color = dict() for node in G.nodes(): if node in id_to_log2fold: node_to_color[node] = my_scale(id_to_log2fold[node][0]).hexcode else: node_to_color[node] = '#f1f1f1' print node_to_color #Get number of edges in graph edges = G.edges() numedges = len(edges) print numedges print G.edges(data=True) #Change directory os.chdir("/data/CCBB_internal/interns/Nicole/ToppGene") #Map to indices for source/target in edges node_map = dict(zip(nodes,range(numnodes))) #Dictionaries that hold per node and per edge attributes nodes_dict = [{"id":node_to_gene[n],"degree":G.degree(n),"color":node_to_color[n], "node_shape":"box", "node_size":10,'border_width':1, "id_num":node_to_symbol[n], "x":node_to_x[n], "y":node_to_y[n]} for n in nodes] edges_dict = [{"source":node_map[edges[i][0]], "target":node_map[edges[i][1]], "color":edge_to_color[edges[i]], "id":edge_to_name[edges[i]], "edge_label":'', "hidden":'false', "physics":'true'} for i in range(numedges)] #HTML file label for first graph (must manually increment later) time = 1700 #Make edges thicker #Create and display the graph here visJS_module.visjs_network(nodes_dict, edges_dict, time_stamp = time, node_label_field = "id_num", edge_width = 3, border_color = "black", edge_arrow_to = True, edge_font_size = 15, edge_font_align= "top", physics_enabled = False, graph_width = 1000, graph_height = 1000) """ Explanation: Weijun Luo and Cory Brouwer. Pathview: an R/Bioconductor package for pathway-based data integration and visualization. Bioinformatics, 29(14):1830-1831, 2013. doi: 10.1093/bioinformatics/btt285. Implement KEGG_pathway_vis Jupyter Notebook (by L. Huang) Only works for one pathway (first one) End of explanation """
GoogleCloudPlatform/tf-estimator-tutorials
08_Text_Analysis/06 - Part_2 - Text Classification - Hacker News - DNNClassifier with TF-Hub Sentence Embedding.ipynb
apache-2.0
import os class Params: pass # Set to run on GCP Params.GCP_PROJECT_ID = 'ksalama-gcp-playground' Params.REGION = 'europe-west1' Params.BUCKET = 'ksalama-gcs-cloudml' Params.PLATFORM = 'local' # local | GCP Params.DATA_DIR = 'data/news' if Params.PLATFORM == 'local' else 'gs://{}/data/news'.format(Params.BUCKET) Params.TRANSFORMED_DATA_DIR = os.path.join(Params.DATA_DIR, 'transformed') Params.TRANSFORMED_TRAIN_DATA_FILE_PREFIX = os.path.join(Params.TRANSFORMED_DATA_DIR, 'train') Params.TRANSFORMED_EVAL_DATA_FILE_PREFIX = os.path.join(Params.TRANSFORMED_DATA_DIR, 'eval') Params.TEMP_DIR = os.path.join(Params.DATA_DIR, 'tmp') Params.MODELS_DIR = 'models/news' if Params.PLATFORM == 'local' else 'gs://{}/models/news'.format(Params.BUCKET) Params.TRANSFORM_ARTEFACTS_DIR = os.path.join(Params.MODELS_DIR,'transform') Params.TRAIN = True Params.RESUME_TRAINING = False Params.EAGER = False if Params.EAGER: tf.enable_eager_execution() """ Explanation: Text Classification using TensorFlow and Google Cloud - Part 2 This bigquery-public-data:hacker_news contains all stories and comments from Hacker News from its launch in 2006. Each story contains a story id, url, the title of the story, tthe author that made the post, when it was written, and the number of points the story received. The objective is, given the title of the story, we want to build an ML model that can predict the source of this story. TF DNNClassifier with TF.Hub Sentence Embedding This notebook illustrates how to build a TF premade estimator, namely DNNClassifier, while the input text will be repesented as sentence embedding, using a tf.hub text embedding module. The model will be using the transformed data produced in part one. Note that, the tf.hub text embedding module will make use of only the the raw text feature (title). The overall steps are as follows: Define the metadata Define data input function Create feature columns (use the tf.hub text embedding module) Create the premade DNNClassifier estimator Setup experiement Hyper-parameters & RunConfig Serving function (for exported model) TrainSpec & EvalSpec Run experiement Evalute the model Use SavedModel for prediction Setting Global Parameters End of explanation """ import tensorflow as tf from tensorflow import data from tensorflow.contrib.learn.python.learn.utils import input_fn_utils from tensorflow_transform.beam.tft_beam_io import transform_fn_io from tensorflow_transform.tf_metadata import metadata_io from tensorflow_transform.tf_metadata import dataset_schema from tensorflow_transform.tf_metadata import dataset_metadata from tensorflow_transform.saved import saved_transform_io print tf.__version__ """ Explanation: Importing libraries End of explanation """ RAW_HEADER = 'key,title,source'.split(',') RAW_DEFAULTS = [['NA'],['NA'],['NA']] TARGET_FEATURE_NAME = 'source' TARGET_LABELS = ['github', 'nytimes', 'techcrunch'] TEXT_FEATURE_NAME = 'title' KEY_COLUMN = 'key' VOCAB_SIZE = 20000 TRAIN_SIZE = 73124 EVAL_SIZE = 23079 DELIMITERS = '.,!?() ' raw_metadata = dataset_metadata.DatasetMetadata(dataset_schema.Schema({ KEY_COLUMN: dataset_schema.ColumnSchema( tf.string, [], dataset_schema.FixedColumnRepresentation()), TEXT_FEATURE_NAME: dataset_schema.ColumnSchema( tf.string, [], dataset_schema.FixedColumnRepresentation()), TARGET_FEATURE_NAME: dataset_schema.ColumnSchema( tf.string, [], dataset_schema.FixedColumnRepresentation()), })) transformed_metadata = metadata_io.read_metadata( os.path.join(Params.TRANSFORM_ARTEFACTS_DIR,"transformed_metadata")) raw_feature_spec = raw_metadata.schema.as_feature_spec() transformed_feature_spec = transformed_metadata.schema.as_feature_spec() print transformed_feature_spec """ Explanation: 1. Define Metadata End of explanation """ def parse_tf_example(tf_example): parsed_features = tf.parse_single_example(serialized=tf_example, features=transformed_feature_spec) target = parsed_features.pop(TARGET_FEATURE_NAME) return parsed_features, target def generate_tfrecords_input_fn(files_pattern, mode=tf.estimator.ModeKeys.EVAL, num_epochs=1, batch_size=200): def _input_fn(): file_names = data.Dataset.list_files(files_pattern) if Params.EAGER: print file_names dataset = data.TFRecordDataset(file_names ) dataset = dataset.apply( tf.contrib.data.shuffle_and_repeat(count=num_epochs, buffer_size=batch_size*2) ) dataset = dataset.apply( tf.contrib.data.map_and_batch(parse_tf_example, batch_size=batch_size, num_parallel_batches=2) ) datset = dataset.prefetch(batch_size) if Params.EAGER: return dataset iterator = dataset.make_one_shot_iterator() features, target = iterator.get_next() return features, target return _input_fn """ Explanation: 2. Define Input Function End of explanation """ import tensorflow_hub as hub print hub.__version__ def create_feature_columns(hparams): title_embeding_column = hub.text_embedding_column( "title", "https://tfhub.dev/google/universal-sentence-encoder/1", trainable=hparams.trainable_embedding) feature_columns = [title_embeding_column] print "feature columns: \n {}".format(feature_columns) print "" return feature_columns """ Explanation: 3. Create feature columns End of explanation """ def create_estimator(hparams, run_config): feature_columns = create_feature_columns(hparams) optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate) estimator = tf.estimator.DNNClassifier( feature_columns=feature_columns, n_classes =len(TARGET_LABELS), label_vocabulary=TARGET_LABELS, hidden_units=hparams.hidden_units, optimizer=optimizer, config=run_config ) return estimator """ Explanation: 4. Create a model using a premade DNNClassifer End of explanation """ NUM_EPOCHS = 10 BATCH_SIZE = 1000 TOTAL_STEPS = (TRAIN_SIZE/BATCH_SIZE)*NUM_EPOCHS EVAL_EVERY_SEC = 60 hparams = tf.contrib.training.HParams( num_epochs = NUM_EPOCHS, batch_size = BATCH_SIZE, trainable_embedding = False, learning_rate = 0.01, hidden_units=[128, 64], max_steps = TOTAL_STEPS, ) MODEL_NAME = 'dnn_estimator_hub' model_dir = os.path.join(Params.MODELS_DIR, MODEL_NAME) run_config = tf.estimator.RunConfig( tf_random_seed=19830610, log_step_count_steps=1000, save_checkpoints_secs=EVAL_EVERY_SEC, keep_checkpoint_max=1, model_dir=model_dir ) print(hparams) print("") print("Model Directory:", run_config.model_dir) print("Dataset Size:", TRAIN_SIZE) print("Batch Size:", BATCH_SIZE) print("Steps per Epoch:",TRAIN_SIZE/BATCH_SIZE) print("Total Steps:", TOTAL_STEPS) """ Explanation: 5. Setup Experiment 5.1 HParams and RunConfig End of explanation """ def generate_serving_input_fn(): def _serving_fn(): receiver_tensor = { 'title': tf.placeholder(dtype=tf.string, shape=[None]) } return tf.estimator.export.ServingInputReceiver( receiver_tensor, receiver_tensor) return _serving_fn """ Explanation: 5.2 Serving function End of explanation """ train_spec = tf.estimator.TrainSpec( input_fn = generate_tfrecords_input_fn( Params.TRANSFORMED_TRAIN_DATA_FILE_PREFIX+"*", mode = tf.estimator.ModeKeys.TRAIN, num_epochs=hparams.num_epochs, batch_size=hparams.batch_size ), max_steps=hparams.max_steps, hooks=None ) eval_spec = tf.estimator.EvalSpec( input_fn = generate_tfrecords_input_fn( Params.TRANSFORMED_EVAL_DATA_FILE_PREFIX+"*", mode=tf.estimator.ModeKeys.EVAL, num_epochs=1, batch_size=hparams.batch_size ), exporters=[tf.estimator.LatestExporter( name="estimate", # the name of the folder in which the model will be exported to under export serving_input_receiver_fn=generate_serving_input_fn(), exports_to_keep=1, as_text=False)], steps=None, throttle_secs=EVAL_EVERY_SEC ) """ Explanation: 5.3 TrainSpec & EvalSpec End of explanation """ from datetime import datetime import shutil if Params.TRAIN: if not Params.RESUME_TRAINING: print("Removing previous training artefacts...") shutil.rmtree(model_dir, ignore_errors=True) else: print("Resuming training...") tf.logging.set_verbosity(tf.logging.INFO) time_start = datetime.utcnow() print("Experiment started at {}".format(time_start.strftime("%H:%M:%S"))) print(".......................................") estimator = create_estimator(hparams, run_config) tf.estimator.train_and_evaluate( estimator=estimator, train_spec=train_spec, eval_spec=eval_spec ) time_end = datetime.utcnow() print(".......................................") print("Experiment finished at {}".format(time_end.strftime("%H:%M:%S"))) print("") time_elapsed = time_end - time_start print("Experiment elapsed time: {} seconds".format(time_elapsed.total_seconds())) else: print "Training was skipped!" """ Explanation: 6. Run experiment End of explanation """ tf.logging.set_verbosity(tf.logging.ERROR) estimator = create_estimator(hparams, run_config) train_metrics = estimator.evaluate( input_fn = generate_tfrecords_input_fn( files_pattern= Params.TRANSFORMED_TRAIN_DATA_FILE_PREFIX+"*", mode= tf.estimator.ModeKeys.EVAL, batch_size= TRAIN_SIZE), steps=1 ) print("############################################################################################") print("# Train Measures: {}".format(train_metrics)) print("############################################################################################") eval_metrics = estimator.evaluate( input_fn=generate_tfrecords_input_fn( files_pattern= Params.TRANSFORMED_EVAL_DATA_FILE_PREFIX+"*", mode= tf.estimator.ModeKeys.EVAL, batch_size= EVAL_SIZE), steps=1 ) print("") print("############################################################################################") print("# Eval Measures: {}".format(eval_metrics)) print("############################################################################################") """ Explanation: 7. Evaluate the model End of explanation """ import os export_dir = model_dir +"/export/estimate/" saved_model_dir = os.path.join(export_dir, os.listdir(export_dir)[0]) print(saved_model_dir) print("") predictor_fn = tf.contrib.predictor.from_saved_model( export_dir = saved_model_dir, signature_def_key="predict" ) output = predictor_fn( { 'title':[ 'Microsoft and Google are joining forces for a new AI framework', 'A new version of Python is mind blowing', 'EU is investigating new data privacy policies' ] } ) print(output) """ Explanation: 8. Use Saved Model for Predictions End of explanation """
mne-tools/mne-tools.github.io
dev/_downloads/7ca3f34c286b629113cbb522edf26a21/75_cluster_ftest_spatiotemporal.ipynb
bsd-3-clause
# Authors: Denis Engemann <denis.engemann@gmail.com> # Jona Sassenhagen <jona.sassenhagen@gmail.com> # Alex Rockhill <aprockhill@mailbox.org> # Stefan Appelhoff <stefan.appelhoff@mailbox.org> # # License: BSD-3-Clause import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable import scipy.stats import mne from mne.stats import spatio_temporal_cluster_test, combine_adjacency from mne.datasets import sample from mne.channels import find_ch_adjacency from mne.viz import plot_compare_evokeds from mne.time_frequency import tfr_morlet """ Explanation: Spatiotemporal permutation F-test on full sensor data Tests for differential evoked responses in at least one condition using a permutation clustering test. The FieldTrip neighbor templates will be used to determine the adjacency between sensors. This serves as a spatial prior to the clustering. Spatiotemporal clusters will then be visualized using custom matplotlib code. Here, the unit of observation is epochs from a specific study subject. However, the same logic applies when the unit observation is a number of study subject each of whom contribute their own averaged data (i.e., an average of their epochs). This would then be considered an analysis at the "2nd level". See the FieldTrip tutorial for a caveat regarding the possible interpretation of "significant" clusters. For more information on cluster-based permutation testing in MNE-Python, see also: tut-cluster-one-samp-tfr End of explanation """ data_path = sample.data_path() meg_path = data_path / 'MEG' / 'sample' raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif' event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif' event_id = {'Aud/L': 1, 'Aud/R': 2, 'Vis/L': 3, 'Vis/R': 4} tmin = -0.2 tmax = 0.5 # Setup for reading the raw data raw = mne.io.read_raw_fif(raw_fname, preload=True) raw.filter(1, 30) events = mne.read_events(event_fname) """ Explanation: Set parameters End of explanation """ picks = mne.pick_types(raw.info, meg='mag', eog=True) reject = dict(mag=4e-12, eog=150e-6) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=None, reject=reject, preload=True) epochs.drop_channels(['EOG 061']) epochs.equalize_event_counts(event_id) # Obtain the data as a 3D matrix and transpose it such that # the dimensions are as expected for the cluster permutation test: # n_epochs × n_times × n_channels X = [epochs[event_name].get_data() for event_name in event_id] X = [np.transpose(x, (0, 2, 1)) for x in X] """ Explanation: Read epochs for the channel of interest End of explanation """ adjacency, ch_names = find_ch_adjacency(epochs.info, ch_type='mag') print(type(adjacency)) # it's a sparse matrix! mne.viz.plot_ch_adjacency(epochs.info, adjacency, ch_names) """ Explanation: Find the FieldTrip neighbor definition to setup sensor adjacency End of explanation """ # We are running an F test, so we look at the upper tail # see also: https://stats.stackexchange.com/a/73993 tail = 1 # We want to set a critical test statistic (here: F), to determine when # clusters are being formed. Using Scipy's percent point function of the F # distribution, we can conveniently select a threshold that corresponds to # some alpha level that we arbitrarily pick. alpha_cluster_forming = 0.001 # For an F test we need the degrees of freedom for the numerator # (number of conditions - 1) and the denominator (number of observations # - number of conditions): n_conditions = len(event_id) n_observations = len(X[0]) dfn = n_conditions - 1 dfd = n_observations - n_conditions # Note: we calculate 1 - alpha_cluster_forming to get the critical value # on the right tail f_thresh = scipy.stats.f.ppf(1 - alpha_cluster_forming, dfn=dfn, dfd=dfd) # run the cluster based permutation analysis cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000, threshold=f_thresh, tail=tail, n_jobs=None, buffer_size=None, adjacency=adjacency) F_obs, clusters, p_values, _ = cluster_stats """ Explanation: Compute permutation statistic How does it work? We use clustering to "bind" together features which are similar. Our features are the magnetic fields measured over our sensor array at different times. This reduces the multiple comparison problem. To compute the actual test-statistic, we first sum all F-values in all clusters. We end up with one statistic for each cluster. Then we generate a distribution from the data by shuffling our conditions between our samples and recomputing our clusters and the test statistics. We test for the significance of a given cluster by computing the probability of observing a cluster of that size :footcite:MarisOostenveld2007,Sassenhagen2019. End of explanation """ # We subselect clusters that we consider significant at an arbitrarily # picked alpha level: "p_accept". # NOTE: remember the caveats with respect to "significant" clusters that # we mentioned in the introduction of this tutorial! p_accept = 0.01 good_cluster_inds = np.where(p_values < p_accept)[0] # configure variables for visualization colors = {"Aud": "crimson", "Vis": 'steelblue'} linestyles = {"L": '-', "R": '--'} # organize data for plotting evokeds = {cond: epochs[cond].average() for cond in event_id} # loop over clusters for i_clu, clu_idx in enumerate(good_cluster_inds): # unpack cluster information, get unique indices time_inds, space_inds = np.squeeze(clusters[clu_idx]) ch_inds = np.unique(space_inds) time_inds = np.unique(time_inds) # get topography for F stat f_map = F_obs[time_inds, ...].mean(axis=0) # get signals at the sensors contributing to the cluster sig_times = epochs.times[time_inds] # create spatial mask mask = np.zeros((f_map.shape[0], 1), dtype=bool) mask[ch_inds, :] = True # initialize figure fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3)) # plot average test statistic and mark significant sensors f_evoked = mne.EvokedArray(f_map[:, np.newaxis], epochs.info, tmin=0) f_evoked.plot_topomap(times=0, mask=mask, axes=ax_topo, cmap='Reds', vmin=np.min, vmax=np.max, show=False, colorbar=False, mask_params=dict(markersize=10)) image = ax_topo.images[0] # remove the title that would otherwise say "0.000 s" ax_topo.set_title("") # create additional axes (for ERF and colorbar) divider = make_axes_locatable(ax_topo) # add axes for colorbar ax_colorbar = divider.append_axes('right', size='5%', pad=0.05) plt.colorbar(image, cax=ax_colorbar) ax_topo.set_xlabel( 'Averaged F-map ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]])) # add new axis for time courses and plot time courses ax_signals = divider.append_axes('right', size='300%', pad=1.2) title = 'Cluster #{0}, {1} sensor'.format(i_clu + 1, len(ch_inds)) if len(ch_inds) > 1: title += "s (mean)" plot_compare_evokeds(evokeds, title=title, picks=ch_inds, axes=ax_signals, colors=colors, linestyles=linestyles, show=False, split_legend=True, truncate_yaxis='auto') # plot temporal cluster extent ymin, ymax = ax_signals.get_ylim() ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1], color='orange', alpha=0.3) # clean up viz mne.viz.tight_layout(fig=fig) fig.subplots_adjust(bottom=.05) plt.show() """ Explanation: <div class="alert alert-info"><h4>Note</h4><p>Note how we only specified an adjacency for sensors! However, because we used :func:`mne.stats.spatio_temporal_cluster_test`, an adjacency for time points was automatically taken into account. That is, at time point N, the time points N - 1 and N + 1 were considered as adjacent (this is also called "lattice adjacency"). This is only possbile because we ran the analysis on 2D data (times × channels) per observation ... for 3D data per observation (e.g., times × frequencies × channels), we will need to use :func:`mne.stats.combine_adjacency`, as shown further below.</p></div> Note also that the same functions work with source estimates. The only differences are the origin of the data, the size, and the adjacency definition. It can be used for single trials or for groups of subjects. Visualize clusters End of explanation """ decim = 4 freqs = np.arange(7, 30, 3) # define frequencies of interest n_cycles = freqs / freqs[0] epochs_power = list() for condition in [epochs[k] for k in ('Aud/L', 'Vis/L')]: this_tfr = tfr_morlet(condition, freqs, n_cycles=n_cycles, decim=decim, average=False, return_itc=False) this_tfr.apply_baseline(mode='ratio', baseline=(None, 0)) epochs_power.append(this_tfr.data) # transpose again to (epochs, frequencies, times, channels) X = [np.transpose(x, (0, 2, 3, 1)) for x in epochs_power] """ Explanation: Permutation statistic for time-frequencies Let's do the same thing with the time-frequency decomposition of the data (see tut-sensors-time-freq for a tutorial and ex-tfr-comparison for a comparison of time-frequency methods) to show how cluster permutations can be done on higher-dimensional data. End of explanation """ # our data at each observation is of shape frequencies × times × channels tfr_adjacency = combine_adjacency( len(freqs), len(this_tfr.times), adjacency) """ Explanation: Remember the note on the adjacency matrix from above: For 3D data, as here, we must use :func:mne.stats.combine_adjacency to extend the sensor-based adjacency to incorporate the time-frequency plane as well. Here, the integer inputs are converted into a lattice and combined with the sensor adjacency matrix so that data at similar times and with similar frequencies and at close sensor locations are clustered together. End of explanation """ # This time we don't calculate a threshold based on the F distribution. # We might as well select an arbitrary threshold for cluster forming tfr_threshold = 15.0 # run cluster based permutation analysis cluster_stats = spatio_temporal_cluster_test( X, n_permutations=1000, threshold=tfr_threshold, tail=1, n_jobs=None, buffer_size=None, adjacency=tfr_adjacency) """ Explanation: Now we can run the cluster permutation test, but first we have to set a threshold. This example decimates in time and uses few frequencies so we need to increase the threshold from the default value in order to have differentiated clusters (i.e., so that our algorithm doesn't just find one large cluster). For a more principled method of setting this parameter, threshold-free cluster enhancement may be used. See disc-stats for a discussion. End of explanation """ F_obs, clusters, p_values, _ = cluster_stats good_cluster_inds = np.where(p_values < p_accept)[0] for i_clu, clu_idx in enumerate(good_cluster_inds): # unpack cluster information, get unique indices freq_inds, time_inds, space_inds = clusters[clu_idx] ch_inds = np.unique(space_inds) time_inds = np.unique(time_inds) freq_inds = np.unique(freq_inds) # get topography for F stat f_map = F_obs[freq_inds].mean(axis=0) f_map = f_map[time_inds].mean(axis=0) # get signals at the sensors contributing to the cluster sig_times = epochs.times[time_inds] # initialize figure fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3)) # create spatial mask mask = np.zeros((f_map.shape[0], 1), dtype=bool) mask[ch_inds, :] = True # plot average test statistic and mark significant sensors f_evoked = mne.EvokedArray(f_map[:, np.newaxis], epochs.info, tmin=0) f_evoked.plot_topomap(times=0, mask=mask, axes=ax_topo, cmap='Reds', vmin=np.min, vmax=np.max, show=False, colorbar=False, mask_params=dict(markersize=10)) image = ax_topo.images[0] # create additional axes (for ERF and colorbar) divider = make_axes_locatable(ax_topo) # add axes for colorbar ax_colorbar = divider.append_axes('right', size='5%', pad=0.05) plt.colorbar(image, cax=ax_colorbar) ax_topo.set_xlabel( 'Averaged F-map ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]])) # remove the title that would otherwise say "0.000 s" ax_topo.set_title("") # add new axis for spectrogram ax_spec = divider.append_axes('right', size='300%', pad=1.2) title = 'Cluster #{0}, {1} spectrogram'.format(i_clu + 1, len(ch_inds)) if len(ch_inds) > 1: title += " (max over channels)" F_obs_plot = F_obs[..., ch_inds].max(axis=-1) F_obs_plot_sig = np.zeros(F_obs_plot.shape) * np.nan F_obs_plot_sig[tuple(np.meshgrid(freq_inds, time_inds))] = \ F_obs_plot[tuple(np.meshgrid(freq_inds, time_inds))] for f_image, cmap in zip([F_obs_plot, F_obs_plot_sig], ['gray', 'autumn']): c = ax_spec.imshow(f_image, cmap=cmap, aspect='auto', origin='lower', extent=[epochs.times[0], epochs.times[-1], freqs[0], freqs[-1]]) ax_spec.set_xlabel('Time (ms)') ax_spec.set_ylabel('Frequency (Hz)') ax_spec.set_title(title) # add another colorbar ax_colorbar2 = divider.append_axes('right', size='5%', pad=0.05) plt.colorbar(c, cax=ax_colorbar2) ax_colorbar2.set_ylabel('F-stat') # clean up viz mne.viz.tight_layout(fig=fig) fig.subplots_adjust(bottom=.05) plt.show() """ Explanation: Finally, we can plot our results. It is difficult to visualize clusters in time-frequency-sensor space; plotting time-frequency spectrograms and plotting topomaps display time-frequency and sensor space respectively but they are difficult to combine. We will plot topomaps with the clustered sensors colored in white adjacent to spectrograms in order to provide a visualization of the results. This is a dimensionally limited view, however. Each sensor has its own significant time-frequencies, but, in order to display a single spectrogram, all the time-frequencies that are significant for any sensor in the cluster are plotted as significant. This is a difficulty inherent to visualizing high-dimensional data and should be taken into consideration when interpreting results. End of explanation """
thanhleviet/weed
1-Acquire.ipynb
mit
# Load the libraries import pandas as pd import numpy as np # Load the dataset df = pd.read_csv("data/Weed_Price.csv") # Shape of the dateset - rows & columns df.shape # Check for type of each variable df.dtypes # Lets load this again with date as date type df = pd.read_csv("data/Weed_Price.csv", parse_dates=[-1]) # Now check for type for each row df.dtypes # Get the names of all columns df.columns # Get the index of all rows df.index """ Explanation: 1. Acquire the Data "Data is the new oil" Ways to acquire data (typical data source) Download from an internal system Obtained from client, or other 3rd party Extracted from a web-based API Scraped from a website Extracted from a PDF file Gathered manually and recorded Data Formats - Flat files (e.g. csv) - Excel files - Database (e.g. MySQL) - JSON - HDFS (Hadoop) Two Datasets - Price of Weed in US - Demographic data by US State 1.1 - Crowdsource the Price of Weed dataset The Price of Weed website - http://www.priceofweed.com/ Crowdsources the price paid by people on the street to get weed. Self Reported. - Location is auto detected or can be choosen - Quality is classified in three categories - High - Medium - Low - Price by weight - an ounce - a half ounce - a quarter - an eighth - 10 grams - 5 grams - 1 gram - Strain (though not showed in the dataset) Reported at individual transaction level Here is a sample data set from United States - http://www.priceofweed.com/prices/United-States.html See note - Averages are corrected for outliers based on standard deviation from the mean. 1.2 Scrape the data Frank Bi from The Verge wrote a script to scrape the data daily. The daily prices are available on github at https://github.com/frankbi/price-of-weed Here is sample data from one day - 23rd July 2015 - https://github.com/frankbi/price-of-weed/blob/master/data/weedprices23072015.csv 1.3 Combine the data All the csv files for each day were combined into one large csv. Done by YHAT. http://blog.yhathq.com/posts/7-funny-datasets.html 1.4 Key Questions / Assumptions Data is an abstraction of the reality. What assumptions have been in this entire data collections process? Are we aware of the assumptions in this process? How to ensure that the data is accurate or representative for the question we are trying to answer? 1.5 Loading the Data End of explanation """ # Can we see some sample rows - the top 5 rows df.head() # Can we see some sample rows - the bottom 5 rows df.tail() # Get specific rows df[20:25] # Can we access a specific columns df["State"] # Using the dot notation df.State # Selecting specific column and rows df[0:5]["State"] # Works both ways df["State"][0:5] #Getting unique values of State pd.unique(df['State']) """ Explanation: 1.6 Viewing the Data End of explanation """ df.index df.loc[0] df.iloc[0,0] df.ix[0,0] """ Explanation: 1.7 Slicing columns using pandas End of explanation """ #Find weighted average price with respective weights of 0.6, 0.4 for HighQ and MedQ #Python approach. Loop over all rows. #For each row, multiply the respective columns by those weights. #Add the output to an array #It is easy to convert pandas series to numpy array. highq_np = np.array(df.HighQ) medq_np = np.array(df.MedQ) #Standard pythonic code def find_weighted_price(): global weighted_price weighted_price = [] for i in range(df.shape[0]): weighted_price.append(0.6*highq_np[i]*0.4*highq_np[i]) #print the weighted price find_weighted_price() print weighted_price """ Explanation: Exercise 1) Load the Demographics_State.csv dataset 2) Show the five first rows of the dataset 3) Select the column with the State name in the data frame 4) Get help 5) Change index to date 6) Get all the data for 2nd January 2014 Thinking in Vectors Difference between loops and vectors End of explanation """ #Vectorized Code weighted_price_vec = 0.6*highq_np + 0.4*medq_np """ Explanation: Exercise: Find the running time of the above program End of explanation """
GoogleCloudPlatform/rad-lab
modules/data_science/scripts/build/notebooks/Exploring_gnomad_on_BigQuery.ipynb
apache-2.0
# Import libraries import numpy as np import os """ Explanation: Sample Notebook for exploring gnomAD in BigQuery This notebook contains sample queries to explore the gnomAD dataset which is hosted through the Google Cloud Public Datasets Program. Setup If you just want to look at sample results, you can scroll down to see the output of the existing queries without having to run anything. If you would like to re-run the queries or make changes, you will need to set the Google Cloud project in which to run the analysis. This will be your Radlab Analytics project ID End of explanation """ # Replace project_id with your Google Cloud Project ID. os.environ["GOOGLE_CLOUD_PROJECT"]='radlab-ds-analytics-xxxx' """ Explanation: Set Google Cloud Project To run queries in BigQuery, you need to specify the Google Cloud project that will be used. The queries below report the number of bytes billed by each query. The first 1 TB of query data processed in a project per month is free. For more details, see the BigQuery Pricing page. To find your Project ID, go to the Project Settings page in the Google Cloud Console. You can select the project you want using the drop-down menu at the top of the page. This will be your Radlab Project ID. Update the below environment variable with your project ID End of explanation """ import ipywidgets as widgets print("Variables for Region (Type 1) Queries") gnomad_version_widget_region = widgets.Dropdown( options=['v2_1_1_exomes', 'v2_1_1_genomes', 'v3_genomes'], value='v3_genomes', description='gnomAD version:', disabled=False, style={'description_width': 'initial'} ) display(gnomad_version_widget_region) chromosome_widget_region = widgets.Dropdown( options=['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9', 'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15', 'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22', 'chrX', 'chrY'], value='chr17', description='Chromosome:', disabled=False, style={'description_width': 'initial'} ) display(chromosome_widget_region) gene_symbol_widget_region= widgets.Text( value='BRCA1', placeholder='gene_symbol', description='Gene Symbol:', disabled=False, style={'description_width': 'initial'} ) display(gene_symbol_widget_region) # Set the variables for the rest of the Type 1 queries based on the values above. gnomad_version_region=gnomad_version_widget_region.value chromosome_region=chromosome_widget_region.value gene_symbol_region=gene_symbol_widget_region.value print('Running Region (Type 1) queries on gnomAD version: {}, chromosome: {}, gene symbol: {}'.format( gnomad_version_region, chromosome_region, gene_symbol_region )) if gnomad_version_region.startswith('v3'): # Variant type (snv, indel, multi-snv, multi-indel, or mixed) is stored under difference columns in V2 and V3 variant_type_col = 'variant_type' extra_columns = '' else: variant_type_col = 'alternate_bases. allele_type' # These vep columns only exist in V2 extra_columns = 'vep.STRAND AS STRAND, vep.Protein_position AS Protein_pos,' from google.cloud import bigquery client = bigquery.Client() def run_query(query): query_job = client.query(query) result = query_job.to_dataframe(progress_bar_type='tqdm_notebook') gb_processed = (query_job.total_bytes_billed / 1024 ** 3) print('This query processed {} GB of data which is {}% of your 1 TB monthly free quota.'.format(gb_processed, round(gb_processed / 1024 * 100, 4))) return result query_template = """ SELECT MIN(start_position) AS X, MAX(end_position) AS Y FROM `bigquery-public-data.gnomAD.{GNOMAD_VER}__{CHROM}` AS main_table WHERE EXISTS (SELECT 1 FROM UNNEST(main_table.alternate_bases) AS alternate_bases WHERE EXISTS (SELECT 1 from alternate_bases.vep WHERE SYMBOL = '{GENE}')) """ query = query_template.format(GNOMAD_VER=gnomad_version_region, CHROM=chromosome_region, GENE=gene_symbol_region) limits = run_query(query) print(limits) x = limits.at[0, 'X'] y = limits.at[0, 'Y'] """ Explanation: gnomAD Queries Type1: Explore a particular genomic region This category include queries that extract information from a region of the genome, for example a gene. Because gnomAD BigQuery tables utilize integer range partitioning they are optimized for this type of query. The main requirement to use this feature is to limit queries to a particular region by adding these conditions to the WHERE clause: WHERE start_position &gt;= X AND start_position &lt;= Y Where [X, Y] is the region of interest. You can find values of X and Y by refering to an external databses. For example the following table sumarizes the start and end positions for 4 genes on chromosome 17 extracted from an external resource: | Gene | X | Y | Source | |:-: |- |- |- | | BRCA1 | 43044295 | 43125364 | link | | COL1A1 | 50184096 | 50201649 | link | | TP53 | 31094927 | 31377677 | link | | NF1 | 56593699 | 56595611 | link | Alternatively you could use the following query that extract the same infomration directly from gnomAD tables. In the following example we are using BRCA1 on chr17 as an example. You can enter your gene of interest and chromosome to modify all the following queries. If your query returns NaN this might be because you specified the wrong chromosome, which will query the wrong table. Also you can choose which version of the gnomAD dataset you'd like to use for all the queries: * v2_1_1_exomes * v2_1_1_genomes * v3_genomes End of explanation """ # NOTE: For v2_1_1 the "variant_type" column must be replaced with "alternate_bases.allele_type AS variant_type" query_template = """ SELECT COUNT(1) AS num, variant_type FROM ( SELECT DISTINCT start_position, reference_bases, alternate_bases.alt, {VAR_TYPE_COL} AS variant_type, FROM `bigquery-public-data.gnomAD.{GNOMAD_VER}__{CHROM}` AS main_table, main_table.alternate_bases AS alternate_bases WHERE start_position >= {X} AND start_position <= {Y} ) GROUP BY 2 ORDER BY 1 DESC """ query = query_template.format(GNOMAD_VER=gnomad_version_region, CHROM=chromosome_region, VAR_TYPE_COL=variant_type_col, X=x, Y=y) summary = run_query(query) summary.head() """ Explanation: After you found the [X, Y] range for your gene of interst you can run Type1 queries efficiently. Here are a couple of examples: Query 1.1a - Variant Type (BigQuery) Find the number of INDELs and SNVs in the region of interest using BigQuery End of explanation """ # NOTE: For v2_1_1 the "variant_type" column must be replaced with "alternate_bases.allele_type AS variant_type" query_template = """ SELECT DISTINCT start_position, reference_bases, alternate_bases.alt, {VAR_TYPE_COL} AS variant_type, FROM `bigquery-public-data.gnomAD.{GNOMAD_VER}__{CHROM}` AS main_table, main_table.alternate_bases AS alternate_bases WHERE start_position >= {X} AND start_position <= {Y} ORDER BY 1,2 """ query = query_template.format(GNOMAD_VER=gnomad_version_region, CHROM=chromosome_region, VAR_TYPE_COL=variant_type_col, X=x, Y=y) summary_dataframe = run_query(query) # Count the number of each variant type in Python instead of in BigQuery print('Number of variants by type:') for v in summary_dataframe.variant_type.unique(): print('{}: {}'.format(v, np.count_nonzero(summary_dataframe['variant_type'] == v))) """ Explanation: Query 1.1b - Variant Type (Python) You can also find the number of INDELs and SNVs in the region of interest by doing the aggregation and count in Python using the dataframe. End of explanation """ # NOTE: For v2_1_1 the "variant_type" column must be replaced with "alternate_bases.allele_type AS variant_type" query_template = """ SELECT reference_name AS CHROM, start_position AS POS, names AS ID, reference_bases AS REF, alternate_bases.alt AS ALT, AN, AN_male, AN_female, alternate_bases.AC AS AC, alternate_bases.AC_male AS AC_male, alternate_bases.AC_female AS AC_female, alternate_bases.nhomalt AS nhomalt, alternate_bases.nhomalt_male AS nhomalt_male, alternate_bases.nhomalt_female AS nhomalt_female, FROM `bigquery-public-data.gnomAD.{GNOMAD_VER}__{CHROM}` AS main_table, main_table.alternate_bases AS alternate_bases WHERE start_position >= {X} AND start_position <= {Y} ORDER BY 1,2 """ query = query_template.format(GNOMAD_VER=gnomad_version_region, CHROM=chromosome_region, X=x, Y=y) stats_sex = run_query(query) stats_sex.head() """ Explanation: Instead of aggregating the results in BigQuery to count the number of each variant type, we could return all rows and process them here. The following query adds a few more columns to the previous query. Query 1.2 - Allele Count by Sex A query to retrieve all variants in the region of interest along with AN and AC values split by sex. AN: Total number of alleles in samples AC: Alternate allele count for samples nhomalt: The number of individuals that are called homozygous for the alternate allele. End of explanation """ stats_sex_filtered_ac=stats_sex.loc[stats_sex['AC'] > 10] stats_sex_filtered_ac.head() """ Explanation: We can then perform further analysis on the dataframe such as filtering out variants with a low allele count (AC). End of explanation """ stats_sex_no_male=stats_sex.loc[stats_sex['AC_male'] == 0].sort_values(by=('AC_female'), ascending = False) stats_sex_no_male.head(10) """ Explanation: Or we could filter to find variants that were most common in females that were not found in any male samples. End of explanation """ # NOTE: For v2_1_1 the "variant_type" column must be replaced with "alternate_bases.allele_type AS variant_type" query_template = """ SELECT reference_name AS CHROM, start_position AS POS, names AS ID, reference_bases AS REF, alternate_bases.alt AS ALT, AN_afr, AN_amr, AN_eas, AN_nfe, alternate_bases.AC_afr AS AC_afr, alternate_bases.AC_amr AS AC_amr, alternate_bases.AC_eas AS AC_eas, alternate_bases.AC_nfe AS AC_nfe, FROM `bigquery-public-data.gnomAD.{GNOMAD_VER}__{CHROM}` AS main_table, main_table.alternate_bases AS alternate_bases WHERE start_position >= {X} AND start_position <= {Y} ORDER BY 1,2 """ query = query_template.format(GNOMAD_VER=gnomad_version_region, CHROM=chromosome_region, X=x, Y=y) stats_ancestry = run_query(query) stats_ancestry.head() """ Explanation: Instead of splitting AN and AC values by sex we can analyze ancestry. Query 1.3 - Allele Count by Ancestry A query to retrieve all variants in the region of interest along with AN and AC values for the following ancestries: * afr: African-American/African ancestry * amr: Latino ancestry * eas: East Asian ancestry * nfe: Non-Finnish European ancestry End of explanation """ stats_ancestry_amr=stats_ancestry.loc[ (stats_ancestry['AC_amr'] > 0) & (stats_ancestry['AC_afr'] == 0) & (stats_ancestry['AC_eas'] == 0) & (stats_ancestry['AC_nfe'] == 0)].sort_values(by=('AC_amr'), ascending = False) stats_ancestry_amr.head(10) """ Explanation: An example here would be to report the most common variant for each ancestry that was not present in any of the others. End of explanation """ query_template = """ SELECT column_name, field_path, description FROM `bigquery-public-data`.gnomAD.INFORMATION_SCHEMA.COLUMN_FIELD_PATHS WHERE table_name = "{GNOMAD_VER}__{CHROM}" AND column_name IN ( SELECT COLUMN_NAME FROM `bigquery-public-data`.gnomAD.INFORMATION_SCHEMA.COLUMNS WHERE table_name = "{GNOMAD_VER}__{CHROM}") """ query = query_template.format(GNOMAD_VER=gnomad_version_region, CHROM=chromosome_region) column_info = run_query(query) print('There are {} columns in `bigquery-public-data.gnomAD.{}__{}` table'.format(len(column_info.index), gnomad_version_region, chromosome_region)) column_info.head(7) """ Explanation: Query 1.4 - gnomAD Columns gnomAD tables have many more columns, you can find the full list of columns along with their description using the following query. End of explanation """ AN_columns = column_info[column_info['column_name'].str.startswith('AN')] # Retain only rows that column_name starts with "AN" AN_columns = AN_columns[['column_name', 'description']] # Drop extra column (field_path) AN_columns = AN_columns.sort_values(by=['column_name']) # Sort by column_name AN_columns.head(11) """ Explanation: Using column_info dataframe you can find other available values for the ancestry slice: End of explanation """ AC_columns = column_info[column_info['field_path'].str.startswith('alternate_bases.AC')] # Retain only rows that field_path starts with "alternate_bases.AC" AC_columns = AC_columns[['field_path', 'description']] # Drop extra column (column_name) AC_columns = AC_columns.sort_values(by=['field_path']) # Sort by field_path AC_columns.head(11) """ Explanation: Note that the corresponding values for AC and AF (Alternate allele frequency) exist under the alternate_bases column. End of explanation """ query_template = """ WITH summary_stats AS ( SELECT COUNT(1) AS num_variants, SUM(ARRAY_LENGTH(alternate_bases)) AS num_alts, # This data appears to be bi-allelic. SUM((SELECT alt.AC FROM UNNEST(alternate_bases) AS alt)) AS sum_AC, APPROX_QUANTILES((SELECT alt.AC FROM UNNEST(alternate_bases) AS alt), 10) AS quantiles_AC, SUM(AN) AS sum_AN, APPROX_QUANTILES(AN, 10) AS quantiles_AN, -- Also include some information from Variant Effect Predictor (VEP). STRING_AGG(DISTINCT (SELECT annot.symbol FROM UNNEST(alternate_bases) AS alt, UNNEST(vep) AS annot LIMIT 1), ', ') AS genes FROM `bigquery-public-data.gnomAD.{GNOMAD_VER}__{CHROM}` AS main_table WHERE start_position >= {X} AND start_position <= {Y}) --- --- The resulting quantiles and burden_of_mutation score give a very rough idea of the mutation --- rate within these particular regions of the genome. This query could be further refined to --- compute over smaller windows within the regions of interest and/or over different groupings --- of AC and AN by population. --- SELECT ROUND(({Y} - {X}) / num_variants, 3) AS burden_of_mutation, *, FROM summary_stats """ query = query_template.format(GNOMAD_VER=gnomad_version_region, CHROM=chromosome_region, X=x, Y=y) burden_of_mu = run_query(query) burden_of_mu.head() """ Explanation: Please refer to gnomAD release announcements (v2.1 and v3.0) for more details about demographics and annotation slices. The next query showcases how to use AN and AC values. Query 1.5 - Burden of Mutation Given a region of interest, compute the burden of mutation for the gene along with other summary statistics. End of explanation """ vep_columns = column_info[column_info['field_path'].str.startswith('alternate_bases.vep')] # Retain only rows that field_path starts with "alternate_bases.vep" vep_columns = vep_columns[['field_path', 'description']] # Drop extra column (column_name) vep_columns.head(22) """ Explanation: The other column to use is alternate_bases.vep which contains the VEP annotaions for each variant. End of explanation """ query_template = """ SELECT reference_name AS CHROM, start_position AS POS, names AS ID, reference_bases AS REF, alternate_bases.alt AS ALT, vep.Consequence AS Consequence, vep.IMPACT AS Impact, vep.SYMBOL AS Symbol, vep.Gene AS Gene, vep.EXON AS EXON, vep.INTRON AS INTRON, {EXTRA_COLS} FROM `bigquery-public-data.gnomAD.{GNOMAD_VER}__{CHROM}` AS main_table, main_table.alternate_bases AS alternate_bases, alternate_bases.vep AS vep WHERE start_position >= {X} AND start_position <= {Y} AND REGEXP_CONTAINS(vep.Consequence, r"missense_variant") ORDER BY start_position, reference_bases """ query = query_template.format(GNOMAD_VER=gnomad_version_region, CHROM=chromosome_region, EXTRA_COLS=extra_columns, X=x, Y=y) neg_variants = run_query(query) neg_variants.head() """ Explanation: The next query showcases how to use some of the vep annotation values. Query 1.6 - VEP Annotations Given a region of interest, examine vep annotations to pull out missense variants. End of explanation """ import ipywidgets as widgets print("Variables for Chromosome (Type 2) queries") gnomad_version_widget_chr = widgets.Dropdown( options=['v2_1_1_exomes', 'v2_1_1_genomes', 'v3_genomes'], value='v2_1_1_exomes', description='gnomAD version:', disabled=False, style={'description_width': 'initial'} ) display(gnomad_version_widget_chr) chromosome_widget_chr = widgets.Dropdown( options=['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9', 'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15', 'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22', 'chrX', 'chrY'], value='chr17', description='Chromosome:', disabled=False, style={'description_width': 'initial'} ) display(chromosome_widget_chr) # Set the variables for the rest of the Chromosome (Type 2) queries based on the values above. gnomad_version_chr=gnomad_version_widget_chr.value chromosome_chr=chromosome_widget_chr.value print('Running chromosome (Type 2) queries on gnomAD version: {}, chromosome: {}'.format( gnomad_version_chr, chromosome_chr )) if gnomad_version_chr.startswith('v3'): # Variant type (snv, indel, multi-snv, multi-indel, or mixed) is stored under difference columns in V2 and V3 variant_type_col = 'variant_type' extra_columns = '' else: variant_type_col = 'alternate_bases. allele_type' # These vep columns only exist in V2 extra_columns = 'vep.STRAND AS STRAND, vep.Protein_position AS Protein_pos,' """ Explanation: gnomAD Queries Type2: Explore an entire chromosome This section queries across an entire chromosome. End of explanation """ query_template = """ SELECT reference_name AS CHROM, start_position AS POS, names AS ID, reference_bases AS REF, alternate_bases.alt AS ALT, vep.SYMBOL AS Symbol, vep.Gene AS Gene, AN, alternate_bases.AC AS AC, alternate_bases.AF AS AF, vep.EXON AS EXON, vep.INTRON AS INTRON, {EXTRA_COLS} FROM `bigquery-public-data.gnomAD.{GNOMAD_VER}__{CHROM}` AS main_table, main_table.alternate_bases AS alternate_bases, alternate_bases.vep AS vep WHERE AN > 0 AND AF > 0.9 ORDER BY AN DESC """ query = query_template.format(GNOMAD_VER=gnomad_version_chr, CHROM=chromosome_chr, EXTRA_COLS=extra_columns) high_af = run_query(query) high_af.head() """ Explanation: Query 2.1 - Find alleles that occur at least in 90% of samples Find all variants on the selected chromosome that were observed in at least 90% of samples. In other words, this query finds variants where allele frequency is very high for non-REF alleles. End of explanation """ high_af.groupby('Symbol').count()[['POS']].sort_values(by=['POS'], ascending=False).head(10) """ Explanation: We can condense the result and only list gene symbols and the number of variants found in the previous query: End of explanation """ query_template = """ SELECT reference_name AS CHROM, start_position AS POS, names AS ID, reference_bases AS REF, alternate_bases.alt AS ALT, vep.SYMBOL AS Symbol, vep.Gene AS Gene, AN, alternate_bases.AC_fin_male AS AC_fin_m, alternate_bases.AC_afr_male AS AC_afr_m, ROUND(ABS(alternate_bases.AC_fin_male - alternate_bases.AC_afr_male) / alternate_bases.AC_male, 3) AS fin_afr_diff, vep.EXON AS EXON, vep.INTRON AS INTRON, {EXTRA_COLS} FROM `bigquery-public-data.gnomAD.{GNOMAD_VER}__{CHROM}` AS main_table, main_table.alternate_bases AS alternate_bases, alternate_bases.vep AS vep WHERE vep.SYMBOL IS NOT NULL AND alternate_bases.AC_male > 20 AND alternate_bases.AC_fin_male > 0 AND alternate_bases.AC_afr_male > 0 order by fin_afr_diff DESC LIMIT 1000 """ query = query_template.format(GNOMAD_VER=gnomad_version_chr, CHROM=chromosome_chr, EXTRA_COLS=extra_columns) stats_chr_ancestry = run_query(query) stats_chr_ancestry.head() """ Explanation: Query 2.2 - Top variants by ancenstry difference Find top 1,000 variants on the selected chromosome that show the most significant differences between male samples of African-American ancestry versus Finnish ancestry End of explanation """ query_template = """ SELECT Symbol, count(1) AS num_indels FROM ( SELECT DISTINCT start_position AS str_pos, alternate_bases.alt AS alt, vep.SYMBOL AS Symbol, {VAR_TYPE_COL} AS variant_type, FROM `bigquery-public-data.gnomAD.{GNOMAD_VER}__{CHROM}` AS main_table, main_table.alternate_bases AS alternate_bases, alternate_bases.vep AS vep WHERE vep.SYMBOL IS NOT NULL AND variant_type IN ('ins', 'del', 'indel') ) GROUP BY 1 ORDER BY 2 DESC LIMIT 1000 """ query = query_template.format(GNOMAD_VER=gnomad_version_chr, CHROM=chromosome_chr, VAR_TYPE_COL=variant_type_col) indel_stats = run_query(query) indel_stats.head(10) """ Explanation: Query 2.3 - Find genes with high number of INDELs Find top 1000 genes with the highest number of INDELs on the selected chromosome. End of explanation """ bucket_size = 10000 query_template = """ SELECT CAST(FLOOR(DIV(start_position, {BUCKET})) AS INT64) AS start_pos_bucket , count(1) AS num_snv FROM ( SELECT DISTINCT start_position, alternate_bases.alt AS alt, {VAR_TYPE_COL} AS variant_type, FROM `bigquery-public-data.gnomAD.{GNOMAD_VER}__{CHROM}` AS main_table, main_table.alternate_bases AS alternate_bases WHERE variant_type = 'snv' ) GROUP BY 1 ORDER BY 1 """ query = query_template.format(GNOMAD_VER=gnomad_version_chr, CHROM=chromosome_chr, VAR_TYPE_COL=variant_type_col, BUCKET=bucket_size) snv_dist = run_query(query) snv_dist.head() import matplotlib.pyplot as plt plt.figure(dpi=150) plt.bar(snv_dist.start_pos_bucket, snv_dist.num_snv) plt.xlabel("Bucket number of start_pos") plt.ylabel("No of SNVs in each bucket") plt.title("Distribution of SNVs on {} for buckets of {} base pairs".format(chromosome_chr, bucket_size)) plt.show() """ Explanation: Query 2.4 - Find distribution of SNVs across a chromosome Find the distribution of SNVs across the selected chromosome. In order to be able to plot the result we group base pairs into buckets of size 10,000. End of explanation """
rusucosmin/courses
ml/ex02/template/ex02.ipynb
mit
import datetime from helpers import * height, weight, gender = load_data(sub_sample=False, add_outlier=False) x, mean_x, std_x = standardize(height) y, tx = build_model_data(x, weight) y.shape, tx.shape """ Explanation: Load the data End of explanation """ def loss_mse(e): """Compute the Mean Square Error for the vector e""" return 1 / 2 * np.mean(e ** 2) def loss_mae(e): """Compute the Mean Absolute Error for the vector e""" return np.mean(np.abs(e)) def compute_loss(y, tx, w): """Calculate the loss. You can calculate the loss using mse or mae. """ e = y - tx.dot(w) return loss_mse(e) """ Explanation: 1 Computing the Cost Function Fill in the compute_loss function below: <a id='compute_loss'></a> End of explanation """ def grid_search(y, tx, w0, w1): """Algorithm for grid search.""" losses = np.zeros((len(w0), len(w1))) for i, p0 in enumerate(w0): for j, p1 in enumerate(w1): losses[i, j] = compute_loss(y, tx, np.array([p0, p1])) return losses """ Explanation: 2 Grid Search Fill in the function grid_search() below: End of explanation """ from grid_search import generate_w, get_best_parameters from plots import grid_visualization # Generate the grid of parameters to be swept grid_w0, grid_w1 = generate_w(num_intervals=50) # Start the grid search start_time = datetime.datetime.now() grid_losses = grid_search(y, tx, grid_w0, grid_w1) # Select the best combinaison loss_star, w0_star, w1_star = get_best_parameters(grid_w0, grid_w1, grid_losses) end_time = datetime.datetime.now() execution_time = (end_time - start_time).total_seconds() # Print the results print("Grid Search: loss*={l}, w0*={w0}, w1*={w1}, execution time={t:.3f} seconds".format( l=loss_star, w0=w0_star, w1=w1_star, t=execution_time)) # Plot the results fig = grid_visualization(grid_losses, grid_w0, grid_w1, mean_x, std_x, height, weight) fig.set_size_inches(10.0,6.0) fig.savefig("grid_plot") # Optional saving """ Explanation: Let us play with the grid search demo now! End of explanation """ def compute_gradient(y, tx, w): """Compute the gradient.""" err = y - tx.dot(w) grad = -tx.T.dot(err) / len(err) return grad, err compute_gradient(y, tx, np.array([50, 10])) """ Explanation: 3 Gradient Descent Again, please fill in the functions compute_gradient below: End of explanation """ def gradient_descent(y, tx, initial_w, max_iters, gamma): """Gradient descent algorithm.""" # Define parameters to store w and loss ws = [initial_w] losses = [] w = initial_w for n_iter in range(max_iters): grad, err = compute_gradient(y, tx, w) loss = loss_mse(err) w = w - gamma * grad # store w and loss ws.append(w) losses.append(loss) print("Gradient Descent({bi}/{ti}): loss={l}, w0={w0}, w1={w1}".format( bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1])) return losses, ws """ Explanation: Please fill in the functions gradient_descent below: End of explanation """ # from gradient_descent import * from plots import gradient_descent_visualization # Define the parameters of the algorithm. max_iters = 50 gamma = 0.7 # Initialization w_initial = np.array([0, 0]) # Start gradient descent. start_time = datetime.datetime.now() gradient_losses, gradient_ws = gradient_descent(y, tx, w_initial, max_iters, gamma) end_time = datetime.datetime.now() # Print result exection_time = (end_time - start_time).total_seconds() print("Gradient Descent: execution time={t:.3f} seconds".format(t=exection_time)) # Time Visualization from ipywidgets import IntSlider, interact def plot_figure(n_iter): fig = gradient_descent_visualization( gradient_losses, gradient_ws, grid_losses, grid_w0, grid_w1, mean_x, std_x, height, weight, n_iter) fig.set_size_inches(10.0, 6.0) interact(plot_figure, n_iter=IntSlider(min=1, max=len(gradient_ws))) """ Explanation: Test your gradient descent function through gradient descent demo shown below: End of explanation """ def compute_stoch_gradient(y, tx, w): """Compute a stochastic gradient from just few examples n and their corresponding y_n labels.""" return compute_gradient(y, tx, w) def stochastic_gradient_descent( y, tx, initial_w, batch_size, max_iters, gamma): """Stochastic gradient descent algorithm.""" ws = [initial_w] losses = [] w = initial_w current_iter = 0 for n_iter in range(max_iters): for minibatch_y, minibatch_tx in batch_iter(y, tx, batch_size, num_batches = 1): gradient, err= compute_stoch_gradient(minibatch_y, minibatch_tx, w) w = w - gamma * gradient loss = loss_mse(err) ws.append(w) losses.append(loss) print("SGD({bi}/{ti}): loss={l}, w0={w0}, w1={w1}".format( bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1])) return losses, ws # from stochastic_gradient_descent import * # Define the parameters of the algorithm. max_iters = 50 gamma = 0.7 batch_size = 1 # Initialization w_initial = np.array([0, 0]) # Start SGD. start_time = datetime.datetime.now() sgd_losses, sgd_ws = stochastic_gradient_descent( y, tx, w_initial, batch_size, max_iters, gamma) end_time = datetime.datetime.now() # Print result exection_time = (end_time - start_time).total_seconds() print("SGD: execution time={t:.3f} seconds".format(t=exection_time)) # Time Visualization from ipywidgets import IntSlider, interact def plot_figure(n_iter): fig = gradient_descent_visualization( sgd_losses, sgd_ws, grid_losses, grid_w0, grid_w1, mean_x, std_x, height, weight, n_iter) fig.set_size_inches(10.0, 6.0) interact(plot_figure, n_iter=IntSlider(min=1, max=len(gradient_ws))) """ Explanation: 4 Stochastic gradient descent End of explanation """ height, weight, gender = load_data(sub_sample=False, add_outlier=True) x, mean_x, std_x = standardize(height) y, tx = build_model_data(x, weight) # Define the parameters of the algorithm. max_iters = 50 gamma = 0.7 # Initialization w_initial = np.array([0, 0]) # Start gradient descent. start_time = datetime.datetime.now() gradient_losses, gradient_ws = gradient_descent(y, tx, w_initial, max_iters, gamma) end_time = datetime.datetime.now() # Print result exection_time = (end_time - start_time).total_seconds() print("Gradient Descent: execution time={t:.3f} seconds".format(t=exection_time)) # Time Visualization from ipywidgets import IntSlider, interact def plot_figure(n_iter): fig = gradient_descent_visualization( gradient_losses, gradient_ws, grid_losses, grid_w0, grid_w1, mean_x, std_x, height, weight, n_iter) fig.set_size_inches(10.0, 6.0) interact(plot_figure, n_iter=IntSlider(min=1, max=len(gradient_ws))) """ Explanation: 5 Effect of Outliers and MAE Cost Function, and Subgradient Descent Exercise 5 Load and plot data containing outliers End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/cccr-iitm/cmip6/models/sandbox-2/aerosol.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'cccr-iitm', 'sandbox-2', 'aerosol') """ Explanation: ES-DOC CMIP6 Model Properties - Aerosol MIP Era: CMIP6 Institute: CCCR-IITM Source ID: SANDBOX-2 Topic: Aerosol Sub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model. Properties: 69 (37 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:53:48 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Software Properties 3. Key Properties --&gt; Timestep Framework 4. Key Properties --&gt; Meteorological Forcings 5. Key Properties --&gt; Resolution 6. Key Properties --&gt; Tuning Applied 7. Transport 8. Emissions 9. Concentrations 10. Optical Radiative Properties 11. Optical Radiative Properties --&gt; Absorption 12. Optical Radiative Properties --&gt; Mixtures 13. Optical Radiative Properties --&gt; Impact Of H2o 14. Optical Radiative Properties --&gt; Radiative Scheme 15. Optical Radiative Properties --&gt; Cloud Interactions 16. Model 1. Key Properties Key properties of the aerosol model 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of aerosol model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of aerosol model code End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.scheme_scope') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "troposhere" # "stratosphere" # "mesosphere" # "mesosphere" # "whole atmosphere" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Scheme Scope Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Atmospheric domains covered by the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.basic_approximations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Basic approximations made in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "3D mass/volume ratio for aerosols" # "3D number concenttration for aerosols" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.5. Prognostic Variables Form Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Prognostic variables in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 1.6. Number Of Tracers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of tracers in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.family_approach') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 1.7. Family Approach Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are aerosol calculations generalized into families of species? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Software Properties Software properties of aerosol code 2.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses atmospheric chemistry time stepping" # "Specific timestepping (operator splitting)" # "Specific timestepping (integrated)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Timestep Framework Physical properties of seawater in ocean 3.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Mathematical method deployed to solve the time evolution of the prognostic variables End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Split Operator Advection Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for aerosol advection (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.3. Split Operator Physical Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for aerosol physics (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.4. Integrated Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestep for the aerosol model (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Implicit" # "Semi-implicit" # "Semi-analytic" # "Impact solver" # "Back Euler" # "Newton Raphson" # "Rosenbrock" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3.5. Integrated Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify the type of timestep scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Meteorological Forcings ** 4.1. Variables 3D Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Three dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Variables 2D Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Two dimensionsal forcing variables, e.g. land-sea mask definition End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Frequency Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Frequency with which meteological forcings are applied (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Resolution Resolution in the aersosol model grid 5.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Canonical Horizontal Resolution Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 5.3. Number Of Horizontal Gridpoints Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 5.4. Number Of Vertical Levels Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Number of vertical levels resolved on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.5. Is Adaptive Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Default is False. Set true if grid resolution changes during execution. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Tuning Applied Tuning methodology for aerosol model 6.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics of the global mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics of mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Transport Aerosol transport 7.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of transport in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Specific transport scheme (eulerian)" # "Specific transport scheme (semi-lagrangian)" # "Specific transport scheme (eulerian and semi-lagrangian)" # "Specific transport scheme (lagrangian)" # TODO - please enter value(s) """ Explanation: 7.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method for aerosol transport modeling End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Mass adjustment" # "Concentrations positivity" # "Gradients monotonicity" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7.3. Mass Conservation Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method used to ensure mass conservation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.convention') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Convective fluxes connected to tracers" # "Vertical velocities connected to tracers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7.4. Convention Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Transport by convention End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Emissions Atmospheric aerosol emissions 8.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of emissions in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Prescribed (climatology)" # "Prescribed CMIP6" # "Prescribed above surface" # "Interactive" # "Interactive above surface" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method used to define aerosol species (several methods allowed because the different species may not use the same method). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Vegetation" # "Volcanos" # "Bare ground" # "Sea surface" # "Lightning" # "Fires" # "Aircraft" # "Anthropogenic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.3. Sources Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Sources of the aerosol species are taken into account in the emissions scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Interannual" # "Annual" # "Monthly" # "Daily" # TODO - please enter value(s) """ Explanation: 8.4. Prescribed Climatology Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify the climatology type for aerosol emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Prescribed Climatology Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and prescribed via a climatology End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.6. Prescribed Spatially Uniform Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and prescribed as spatially uniform End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.7. Interactive Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and specified via an interactive method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.8. Other Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and specified via an &quot;other method&quot; End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.9. Other Method Characteristics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Characteristics of the &quot;other method&quot; used for aerosol emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Concentrations Atmospheric aerosol concentrations 9.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of concentrations in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.2. Prescribed Lower Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the lower boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.3. Prescribed Upper Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the upper boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.4. Prescribed Fields Mmr Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed as mass mixing ratios. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.5. Prescribed Fields Mmr Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed as AOD plus CCNs. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10. Optical Radiative Properties Aerosol optical and radiative properties 10.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of optical and radiative properties End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11. Optical Radiative Properties --&gt; Absorption Absortion properties in aerosol scheme 11.1. Black Carbon Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.2. Dust Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of dust at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.3. Organics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of organics at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12. Optical Radiative Properties --&gt; Mixtures ** 12.1. External Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there external mixing with respect to chemical composition? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12.2. Internal Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there internal mixing with respect to chemical composition? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.3. Mixing Rule Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If there is internal mixing with respect to chemical composition then indicate the mixinrg rule End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13. Optical Radiative Properties --&gt; Impact Of H2o ** 13.1. Size Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does H2O impact size? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.2. Internal Mixture Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does H2O impact internal mixture? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14. Optical Radiative Properties --&gt; Radiative Scheme Radiative scheme for aerosol 14.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of radiative scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.2. Shortwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of shortwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.3. Longwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of longwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Optical Radiative Properties --&gt; Cloud Interactions Aerosol-cloud interactions 15.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of aerosol-cloud interactions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.2. Twomey Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the Twomey effect included? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.3. Twomey Minimum Ccn Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If the Twomey effect is included, then what is the minimum CCN number? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.4. Drizzle Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the scheme affect drizzle? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.5. Cloud Lifetime Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the scheme affect cloud lifetime? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.6. Longwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of longwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16. Model Aerosol model 16.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Dry deposition" # "Sedimentation" # "Wet deposition (impaction scavenging)" # "Wet deposition (nucleation scavenging)" # "Coagulation" # "Oxidation (gas phase)" # "Oxidation (in cloud)" # "Condensation" # "Ageing" # "Advection (horizontal)" # "Advection (vertical)" # "Heterogeneous chemistry" # "Nucleation" # TODO - please enter value(s) """ Explanation: 16.2. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Processes included in the Aerosol model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Radiation" # "Land surface" # "Heterogeneous chemistry" # "Clouds" # "Ocean" # "Cryosphere" # "Gas phase chemistry" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.3. Coupling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Other model components coupled to the Aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.gas_phase_precursors') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "DMS" # "SO2" # "Ammonia" # "Iodine" # "Terpene" # "Isoprene" # "VOC" # "NOx" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.4. Gas Phase Precursors Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of gas phase aerosol precursors. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Bulk" # "Modal" # "Bin" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.5. Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Type(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.bulk_scheme_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Nitrate" # "Sea salt" # "Dust" # "Ice" # "Organic" # "Black carbon / soot" # "SOA (secondary organic aerosols)" # "POM (particulate organic matter)" # "Polar stratospheric ice" # "NAT (Nitric acid trihydrate)" # "NAD (Nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particule)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.6. Bulk Scheme Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of species covered by the bulk scheme. End of explanation """
cristhro/Machine-Learning
ejercicio 3/.ipynb_checkpoints/titanic-checkpoint.ipynb
gpl-3.0
%matplotlib inline import pandas as pd import numpy as np import random as rnd import seaborn as sns import matplotlib.pyplot as plt """ Explanation: Alumnos: Cristhian Rodriguez y Jesus Perucha Practica 3: Titanic End of explanation """ train_df = pd.read_csv('train.csv') test_df = pd.read_csv('test.csv') """ Explanation: Importamos los datos para entrenar y testear End of explanation """ print(train_df.columns.values) train_df.isnull().sum() """ Explanation: Miramos los datos, para ver que si hay nulos o datos que rellenar, como la edad y la cabina en este caso End of explanation """ print (train_df.info()) train_df.describe() """ Explanation: Faltan muchos datos de edad y cabina por rellenar, ademas de 2 embarcos Miramos los tipos de los datos End of explanation """ train_df, test_df = train_df.drop(['Cabin', 'Ticket'], axis=1), test_df.drop(['Cabin', 'Ticket'], axis=1) # Sacamos la descripcion de los valores que son Strings (object) train_df.describe(include=['O']) """ Explanation: Como faltan mas de la mitad de los datos de la cabina y no contienen informacion util, se puede descartar esta feature. Tambien vamos a quitar ticket, porque no hay relacion ninguna entre los nombres de los tickets End of explanation """ plt.title('Survival count between sex', size=20, y=1.1) sns.countplot(x = 'Survived', hue='Sex', data=train_df) #Hay una gran correlacion entre el sexo y la supervivencia # Pasamos el sexo de string a un int, 1 para hombre y 0 para mujer for df in [train_df, test_df]: df['Sex'] = df['Sex'].apply(lambda x : 1 if x == 'male' else 0) # Hay relacion directa entre la clase y la supervivencia plt.figure(figsize=(12, 12)) plt.subplot(2,2,1) plt.title('Survival rate / Pclass', size=15, y=1.1) sns.barplot(x='Pclass', y = 'Survived', data=train_df, palette='muted') sns.countplot(x = 'Survived', hue='Embarked', data=train_df) # Tambien hay una ligera correlacion con el lugar de embarque """ Explanation: Analisis a primera vista de los datos Hay que rellenar los datos que faltan para poder usarlo en los algoritmos de entrenamiento Hay que pasar todos los strings a valores numericos para poder usarlo en los algoritmos de entrenamiento Hay que descartar features que sean inutiles o crear nuevas features a partir de las nuevas para entrenar Visualizacion grafica de la relacion entre las features End of explanation """ train_df['Embarked'] = train_df['Embarked'].fillna('S') for dt in [train_df, test_df]: dt['Embarked'] = dt['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int) #Rellenamos el unico valor que falta de fare test_df['Fare'] = test_df['Fare'].fillna(test_df['Fare'].median()) # Transformamos los valores continuos de fare en valores discretos, agrupando los rangos en 4 grupos, del 0 al 3 for df in [train_df, test_df]: df['Fare'] = pd.qcut(df['Fare'], 4, labels=[0, 1, 2, 3]) train_df.head(5) """ Explanation: Como faltan 2 datos de embarque de 2 personas y usaremos la feature, rellenamos con S porque es donde la mayoria de las personas lo han hecho y hay menos riesgo de falsear las features. Tambien pasamos de S,C,Q a valores enteros para entrenarlos End of explanation """ for df in [train_df, test_df]: df['FamilySize'] = df['Parch'] + df['SibSp'] + 1 sns.barplot(x='FamilySize', y='Survived' , data=train_df) """ Explanation: Como Parch es la abreviacion de 'parent/children', sumado y SibSp es la abreviacion de 'sibling/spouse' sumados, se pueden juntar estas 2 features en una sola que representen el tamaño de la familia que tiene esa persona, incluyendola. Sacamos la grafica para ver la relacion que hay End of explanation """ def filter_family_size(x): if x == 1: return 0 elif x < 5: return 1 else: return 0 for df in [train_df, test_df]: df['FamilySize'] = df['FamilySize'].apply(filter_family_size) train_df = train_df.drop(['Parch', 'SibSp'], axis=1) test_df = test_df.drop(['Parch', 'SibSp'], axis=1) """ Explanation: De esta grafica podemos ver que las personas con 2,3 o 4 de tamaño familiar, tenian mas posibilidades de supervivencia Asi que vamos a simplicar esta nueva feature en 0 si esta fuera de 2,3 o 4 miembros en el barco y 1 si lo esta. Con esto las features Parch y SibSp no hacen falta End of explanation """ corrmat = train_df.corr() sns.heatmap(corrmat, square=True) print ("El numero de datos Age sin rellenar: ",train_df['Age'].isnull().sum()) plt.title('Distribucion de la edad original', size=20, y=1.1) sns.distplot(train_df['Age'].dropna()) #Rellenamos los campos edad vacios guess_ages = np.zeros((2,3)) for dataset in [train_df, test_df]: for i in range(0, 2): for j in range(0, 3): guess_df = dataset[(dataset['Sex'] == i) & \ (dataset['Pclass'] == j+1)]['Age'].dropna() # age_mean = guess_df.mean() # age_std = guess_df.std() # age_guess = rnd.uniform(age_mean - age_std, age_mean + age_std) age_guess = guess_df.median() # Convert random age float to nearest .5 age guess_ages[i,j] = int( age_guess/0.5 + 0.5 ) * 0.5 for i in range(0, 2): for j in range(0, 3): dataset.loc[ (dataset.Age.isnull()) & (dataset.Sex == i) & (dataset.Pclass == j+1),\ 'Age'] = guess_ages[i,j] dataset['Age'] = dataset['Age'].astype(int) print ("El numero de datos Age sin rellenar: ",train_df['Age'].isnull().sum()) """ Explanation: Rellenar la edad La forma mas precisa de hacerlo es usando la mediana y la correlaciones que la edad tiene con otras features,en este caso las mas correladas son el genero y pclass, como se ve en el diagrama de calor de abajo. A partir de la edad he creado una nueva feature con el rango de edades, para ver la supervivencia entre rangos End of explanation """ plt.title('Distribucion de la edad rellena', size=20, y=1.1) sns.distplot(train_df['Age']) #Creamos la nueva feature y la mostramos train_df['AgeBand'] = pd.cut(train_df['Age'], 8) train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean().sort_values(by='AgeBand', ascending=True) sns.countplot(x='Survived', hue='AgeBand' , data=train_df) """ Explanation: Al haber introducido los nuevos datos sobre la media, la distribucion sigue siendo igual a antes de introducirlos, pero con un repunte de datos en la zona de la mediana End of explanation """ for dataset in [train_df, test_df]: dataset.loc[ dataset['Age'] <= 10, 'Age'] = 0 dataset.loc[(dataset['Age'] > 10) & (dataset['Age'] <= 20), 'Age'] = 1 dataset.loc[(dataset['Age'] > 20) & (dataset['Age'] <= 30), 'Age'] = 2 dataset.loc[(dataset['Age'] > 30) & (dataset['Age'] <= 40), 'Age'] = 3 dataset.loc[(dataset['Age'] > 40) & (dataset['Age'] <= 50), 'Age'] = 4 dataset.loc[(dataset['Age'] > 50) & (dataset['Age'] <= 60), 'Age'] = 5 dataset.loc[(dataset['Age'] > 60) & (dataset['Age'] <= 70), 'Age'] = 6 dataset.loc[ dataset['Age'] > 70, 'Age'] = 7 train_df.head() train_df = train_df.drop(['AgeBand'], axis=1) """ Explanation: Convertimos el campo edad en valores de 0 al 7 siguiendo la feature banda de edades que hemos creado antes, con este cambio, banda de edades es una feature que no necesitamos ya End of explanation """ # Filter the name def get_title(x): y = x[x.find(',')+1:].replace('.', '').replace(',', '').strip().split(' ') if y[0] == 'the': # Search for the countess title = y[1] else: title = y[0] return title def filter_title(title, sex): if title in ['Countess', 'Dona', 'Lady', 'Jonkheer', 'Mme', 'Mlle', 'Ms', 'Capt', 'Col', 'Don', 'Sir', 'Major', 'Rev', 'Dr']: if sex: return 'Rare_male' else: return 'Rare_female' else: return title for df in [train_df, test_df]: df['NameLength'] = df['Name'].apply(lambda x : len(x)) df['Title'] = df['Name'].apply(get_title) title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} for dataset in [train_df, test_df]: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) #Quitamos los titulos especiales y los agrupamos en categorias mas concretas for df in [train_df, test_df]: df['Title'] = df.apply(lambda x: filter_title(x['Title'], x['Sex']), axis=1) sns.countplot(y=train_df['Title']) train_df.groupby('Title')['PassengerId'].count().sort_values(ascending=False) # Borramos la columna Name train_df = train_df.drop(['Name', 'PassengerId'], axis=1) test_df = test_df.drop(['Name'], axis=1) train_df.head() """ Explanation: Clasificamos el nombre segun el titulo de una persona End of explanation """ X_train = train_df.drop(["Survived"], axis=1).copy() Y_train = train_df["Survived"] X_test = test_df.drop("PassengerId", axis=1).copy() X_train.shape, Y_train.shape, X_test.shape X_test.head() X_train.head() """ Explanation: Eleccion del Modelo End of explanation """ from sklearn.ensemble import RandomForestClassifier random_forest = RandomForestClassifier(n_estimators=101) random_forest.fit(X_train, Y_train) Y_pred = random_forest.predict(X_test) random_forest.score(X_train, Y_train) #acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2) #acc_random_forest """ Explanation: Random Forest End of explanation """ from sklearn import tree clf = tree.DecisionTreeClassifier() clf.fit(X_train, Y_train) Y_pred = clf.predict(X_test) clf.score(X_train, Y_train) """ Explanation: Decision Tree End of explanation """ from sklearn.svm import SVC svc = SVC(C=10000.0) svc.fit(X_train, Y_train) Y_pred = svc.predict(X_test) svc.score(X_train, Y_train) """ Explanation: Support Vector Machines End of explanation """ from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors = 3) knn.fit(X_train, Y_train) Y_pred = knn.predict(X_test) knn.score(X_train, Y_train) """ Explanation: KNN End of explanation """ submission = pd.DataFrame({ "PassengerId": test_df["PassengerId"], "Survived": Y_pred }) """ Explanation: Creamos el archivo submission para subir a kaggle End of explanation """ submission.to_csv('submission.csv', index=False) """ Explanation: Lo guardamos en formato csv End of explanation """
mtambos/springleaf
Springleaf - preprocess - date columns.ipynb
mit
%pylab inline %load_ext autoreload %autoreload 2 from __future__ import division from collections import defaultdict, namedtuple import cPickle as pickle from datetime import datetime, timedelta import dateutil from functools import partial import inspect import json import os import re import sys import numpy as np import pandas as pd """ Explanation: Import useful stuff and define ancillary functions End of explanation """ if os.name == 'nt': TRAIN_PATH = r'D:\train.csv' PTRAIN_PATH = r'D:\train_preprocessed_float_string_date.csv' TEST_PATH = r'D:\test.csv' GOOGNEWS_PATH = r'D:\GoogleNews-vectors-negative300.bin.gz' VOCAB_PATH = r'D:\big.txt' else: TRAIN_PATH = r'/media/mtambos/speedy/train.csv' PTRAIN_PATH = r'/media/mtambos/speedy/train_preprocessed_float_string_date.csv' TEST_PATH = r'/media/mtambos/speedy/test.csv' GOOGNEWS_PATH = r'/media/mtambos/speedy/GoogleNews-vectors-negative300.bin.gz' VOCAB_PATH = r'/media/mtambos/speedy/big.txt' #df_orig = pd.read_csv(TRAIN_PATH, index_col="ID") df = pd.read_csv(PTRAIN_PATH, index_col="ID") #df """ Explanation: Load train data Using pandas' read_csv with all the defaults End of explanation """ date_cols = ['VAR_0073', 'VAR_0075', 'VAR_0156', 'VAR_0157', 'VAR_0158', 'VAR_0159', 'VAR_0166', 'VAR_0167', 'VAR_0168', 'VAR_0169', 'VAR_0176', 'VAR_0177', 'VAR_0178', 'VAR_0179', 'VAR_0204', 'VAR_0217', 'VAR_0294', 'VAR_0314'] """ Explanation: Define columns End of explanation """ def parse_date_str(date_val): if isinstance(date_val, datetime): return date_val date_val = str(date_val).lower() date_val = None if date_val == 'nan' else date_val if date_val is None: return pd.NaT date_val = date_val[:-2] if date_val[-2:] == '.0' else date_val try: return datetime.strptime(date_val, '%d%b%y:%H:%M:%S') except: try: return datetime.strptime(date_val, '%d%b%y') except: try: return datetime.strptime(date_val, '%Y') except: print date_val return pd.NaT df_date_cols = df[date_cols].applymap(parse_date_str).astype('datetime64[ns]') df_date_cols.describe() df[date_cols] = df_date_cols """ Explanation: Parse the weird date format of the date column End of explanation """ df.drop_duplicates(inplace=True) """ Explanation: Drop duplicate rows End of explanation """ cols_to_drop = set() for i, col in enumerate(date_cols): for col2 in date_cols[i+1:]: if (df[col] == df[col2]).all(): cols_to_drop.add(col2) cols_to_drop """ Explanation: Drop duplicate columns End of explanation """ nan_cols = df.isnull().all() nan_cols = nan_cols.index[nan_cols].tolist() nan_cols """ Explanation: Remove columns with only NaNs End of explanation """ df[date_cols] = df[date_cols].astype('datetime64[ns]') years = pd.DataFrame(columns=[c+'_year' for c in date_cols], index=df.index, dtype=np.int) months = pd.DataFrame(columns=[c+'_month' for c in date_cols], index=df.index, dtype=np.int) days = pd.DataFrame(columns=[c+'_day' for c in date_cols], index=df.index, dtype=np.int) for c in date_cols: dateIndex = pd.DatetimeIndex(df[c]) years[c+'_year'] = dateIndex.year months[c+'_month'] = dateIndex.month days[c+'_day'] = dateIndex.day """ Explanation: Vectorize Datetime colums Create dataframes to separately store the year, month and day information of the date columns End of explanation """ df = df.drop(date_cols, axis=1) df = df.join(years) df = df.join(months) df = df.join(days) date_cols = years.columns.tolist() + months.columns.tolist() + days.columns.tolist() """ Explanation: Delete the original date columns and join the years, months and days DataFrames with the original DataFrame End of explanation """ df_desc = df.describe() df_desc[sorted(df_desc.columns, key=lambda x: df_desc.loc['std', x])] """ Explanation: Eliminate columns with 0 variance See which columns have low standard deviation End of explanation """ std_series = df_desc.loc['std', :] null_std_cols = std_series[std_series == 0] df = df.drop(null_std_cols.index, axis=1) """ Explanation: Eliminate all columns with standard deviation equal to 0 End of explanation """ df.to_csv(PTRAIN_PATH) with open('date_cols.pickle', 'wb') as fp: pickle.dump(date_cols, fp) """ Explanation: Save preprocessed data to another csv file End of explanation """
MCardus/foodnet
graph_analytics/graph_analytics.ipynb
mit
#imports import networkx as nx import pandas as pd from itertools import combinations import matplotlib.pyplot as plt from matplotlib import pylab import sys from itertools import combinations import operator from operator import itemgetter from scipy import integrate # Exploring data recipes_df = pd.read_csv('../data/clean_spanish_recipes.csv',sep='","') print recipes_df.keys() print "\n" print recipes_df.head() # Transforming data #recipes_df["ingredients"].apply(encode("latin-1")) recipes_df["ingredients"] = recipes_df["ingredients"].str.split("', '") print type(recipes_df["ingredients"][0]) """ Explanation: Foodnet - Spanish cuisine analysis Author: Marc Cadús García In this notebook I pretend to apply different analytics techniques over a graph representing the Spanish cuisine in order to extract new insights. It is expected that graph algorithms may help to extract new knowledge for helping to understand better the Spanish culinary culture. To do so, I a going to use Python networkX. I have scrapped near 3000 Spanish recipes from cookpad.com. These recipes and the scrapping code are available in this repository. Data exploration and transformation End of explanation """ def build_graph(nodes, graph): # Generate a new graph. Edges are nodes permutations in pairs edges = combinations(nodes, 2) graph.add_nodes_from(nodes) weighted_edges = list() for edge in edges: if graph.has_edge(edge[0],edge[1]): weighted_edges.append((edge[0],edge[1],graph[edge[0]][edge[1]]['weight']+1)) else: weighted_edges.append((edge[0],edge[1],1)) graph.add_weighted_edges_from(weighted_edges) def save_graph(graph,file_name): #initialze Figure plt.figure(num=None, figsize=(120, 120), dpi=60) plt.axis('off') fig = plt.figure(1) pos = nx.spring_layout(graph) d = nx.degree(graph) nx.draw_networkx_nodes(graph,pos, nodelist=d.keys(), node_size=[v * 10 for v in d.values()]) nx.draw_networkx_edges(graph,pos) nx.draw_networkx_labels(graph,pos) cut = 1.00 xmax = cut * max(xx for xx, yy in pos.values()) ymax = cut * max(yy for xx, yy in pos.values()) plt.xlim(0, xmax) plt.ylim(0, ymax) plt.savefig(file_name,bbox_inches="tight") pylab.close() del fig # Generating graph recipes_graph = nx.Graph() recipes_graph.clear() for val in recipes_df["ingredients"]: build_graph(val,recipes_graph) """ Explanation: Graph building End of explanation """ #Num of nodes print "Total num of nodes: "+str(len(recipes_graph.nodes())) print "Total num of edges: "+str(len(recipes_graph.edges())) # Top 20 higher degree nodes degrees = sorted(recipes_graph.degree_iter(),key=itemgetter(1),reverse=True) high_degree_nodes = list() for node in degrees[:20]: high_degree_nodes.append(node[0]) print node # Top 20 eigenvector centrality eigenvector_centrality = nx.eigenvector_centrality(recipes_graph) eigenvector_centrality_sorted = sorted(eigenvector_centrality.items(), key=itemgetter(1), reverse=True) for node in eigenvector_centrality_sorted[1:21]: print node # Top 20 pagerank centrality pagerank_centrality = nx.eigenvector_centrality(recipes_graph) pagerank_centrality_sorted = sorted(pagerank_centrality.items(), key=itemgetter(1), reverse=True) for node in pagerank_centrality_sorted[1:21]: print node # Conected components connected_component = list(nx.connected_component_subgraphs(recipes_graph)) print "There is "+str(len(connected_component))+" connected componentes" for component in connected_component: print "- Component of "+str(len(component))+ " nodes" if (len(component)==1): print "\t- Ingredient: "+str(component.nodes()) main_component = connected_component[0] # Graph diameter print "Nodes having minimum eccentricity\n"+str(nx.center(main_component)) print "Nodes having maximum eccentricity\n"+str(nx.periphery(main_component)) print "Minimum eccentricity "+str(nx.radius(main_component)) print "Maximum eccentricity "+str(nx.diameter(main_component)) # Mean cut print "Nodes to be removed to disconect the graph"+nx.minimum_node_cut(main_component) """ Explanation: Graph analytics End of explanation """ # For avoid encoding problems reload(sys) sys.setdefaultencoding('utf8') # Original graph save_graph(main_component,"original_graph.jpg") def extract_backbone(g, alpha): backbone_graph = nx.Graph() for node in g: k_n = len(g[node]) if k_n > 1: sum_w = sum( g[node][neighbor]['weight'] for neighbor in g[node] ) for neighbor in g[node]: edgeWeight = g[node][neighbor]['weight'] pij = float(edgeWeight)/sum_w if (1-pij)**(k_n-1) < alpha: # equation 2 backbone_graph.add_edge( node,neighbor, weight = edgeWeight) return backbone_graph save_graph(extract_backbone(main_component,0.01),"backbone_graph.jpg") # Visualizing Higher degree nodes k = recipes_graph.subgraph(high_degree_nodes) save_graph(k,"high_degree_subgraph.jpg") """ Explanation: Visualitzations End of explanation """
svdwulp/da-programming-1
week_08_oefeningen_uitwerkingen.ipynb
gpl-2.0
n = 10000 steps_to_exit = [] for i in range(n): x = 0 steps = 0 while -7 < x < 7: x += np.random.choice([-1, 1]) # step left or right steps += 1 steps_to_exit.append(steps) print("Gemiddeld aantal stappen tot suiker: {:.3f}".format(mean(steps_to_exit))) """ Explanation: Oefening 1 In de bovenstaande afbeelding zie je een mier in zijn natuurlijke leefomgeving: een rij hokjes. De mier zal gaan wandelen, op zoek naar voedsel of andere eerste levensbehoeften. We modelleren het wandelgedrag van de mier als volgt: iedere tijdseenheid (stap) loopt de mier met een kans van 50% 1 vakje naar links en zo niet, dan loopt hij 1 vakje naar rechts. Het doel voor de mier is om de suiker te bereiken in een van de uiterste vakjes. Beantwoord de volgende vraag met een simulatie (experiment 10000 keer uitvoeren): hoeveel tijdseenheden (stapjes) duurt het gemiddeld tot de mier in het meest linkse vakje of het meest rechtse vakje aankomt? End of explanation """ n = 100 successes = 0 steps_used = [] for i in range(n): x, y = 0, 0 steps = 0 for j in range(10000): direction = np.random.choice(list("NESW")) if direction == "N": y += 1 elif direction == "E": x += 1 elif direction == "S": y -= 1 else: x -= 1 steps += 1 if (x, y) == (0, 0): # we zijn terug in het cafe (0, 0) successes += 1 steps_used.append(steps) break else: # steps_used.append(10000) pass print("Kans op terugkeren in café in maximaal 10000 stappen: {:.3f}".format(successes / n)) print(np.mean(steps_used)) print(len(steps_used)) """ Explanation: Oefening 2 Het model in oefening 1 beschrijft een random walk in &eacute;&eacute;n dimensie. In deze oefening doe je een random walk in 2D. Een man stapt om middernacht stomdronken uit een caf&eacute; midden in de stad. Hij wil naar huis, maar zijn richtingsgevoel en co&ouml;rdinatie laten hem enigszins in de steek: iedere stap die hij zet is in een willekeurige richting. Hij loopt steeds exact 1 stap in een willekeurige richting, noord, zuid, oost of west. Hoe groot is de kans dat de man in hooguit 10000 stappen terug komt bij het caf&eacute;? End of explanation """ n = 10000 successes = 0 for i in range(n): sides = list(np.random.rand(3)) sides.sort() if sides[2] < sum(sides[:2]): successes += 1 print("Kans op mogelijkheid driehoek bij benadering {:.3f}".format(successes / n)) """ Explanation: Oefening 3 Wanneer we de lengte van de zijden $a$, $b$ en $c$ van een (mogelijke) driehoek willekeurig kiezen uit de uniforme verdeling $U(0, 1)$, wat is de kans dat er een daadwerkelijk een driehoek van te maken is? End of explanation """ %pylab %matplotlib inline x = linspace(0, 1, 400) y = sqrt(1 - x**4) plt.fill_between(x, y) plt.axis('equal') plt.ylim([0, 1.1]) plt.show() """ Explanation: Oefening 4 Bij de specialisatie Engineering wordt hard gewerkt aan het broodnodige integreren. Maar sommige functies zijn 'wat lastig' te integreren. De integraal $\int_{0}^{1}\sqrt{1 - x^4} dx$ is een voorbeeld van zo'n functie: End of explanation """ n = 500000 under_graph = 0 for i in range(n): x = np.random.rand() y = np.random.rand() fx = sqrt(1 - x**4) if y < fx: under_graph += 1 print("Schatting kans willekeurig punt onder grafiek = {:.3f}".format(under_graph / n)) print("Schatting integraal = {:.5f}".format((1 * 1) * (under_graph / n))) """ Explanation: Met behulp van Monte Carlo simulatie kun je de integraal wel vrij eenvoudig benaderen. Je kunt een rechthoek kiezen waar je integraal binnen valt en dan voor willekeurige punten in de rechthoek bepalen of ze onder of boven de grafiek van de functie vallen. Benader de gegeven integraal met een Monte Carlo simulatie. End of explanation """
mne-tools/mne-tools.github.io
dev/_downloads/f31e73ee907864d95a2b617fdc76b71e/source_label_time_frequency.ipynb
bsd-3-clause
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # # License: BSD-3-Clause import numpy as np import matplotlib.pyplot as plt import mne from mne import io from mne.datasets import sample from mne.minimum_norm import read_inverse_operator, source_induced_power print(__doc__) """ Explanation: Compute power and phase lock in label of the source space Compute time-frequency maps of power and phase lock in the source space. The inverse method is linear based on dSPM inverse operator. The example also shows the difference in the time-frequency maps when they are computed with and without subtracting the evoked response from each epoch. The former results in induced activity only while the latter also includes evoked (stimulus-locked) activity. End of explanation """ data_path = sample.data_path() meg_path = data_path / 'MEG' / 'sample' raw_fname = meg_path / 'sample_audvis_raw.fif' fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif' label_name = 'Aud-rh' fname_label = meg_path / 'labels' / f'{label_name}.label' tmin, tmax, event_id = -0.2, 0.5, 2 # Setup for reading the raw data raw = io.read_raw_fif(raw_fname) events = mne.find_events(raw, stim_channel='STI 014') inverse_operator = read_inverse_operator(fname_inv) include = [] raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more # Picks MEG channels picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True, stim=False, include=include, exclude='bads') reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6) # Load epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=reject, preload=True) # Compute a source estimate per frequency band including and excluding the # evoked response freqs = np.arange(7, 30, 2) # define frequencies of interest label = mne.read_label(fname_label) n_cycles = freqs / 3. # different number of cycle per frequency # subtract the evoked response in order to exclude evoked activity epochs_induced = epochs.copy().subtract_evoked() plt.close('all') for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced], ['evoked + induced', 'induced only'])): # compute the source space power and the inter-trial coherence power, itc = source_induced_power( this_epochs, inverse_operator, freqs, label, baseline=(-0.1, 0), baseline_mode='percent', n_cycles=n_cycles, n_jobs=None) power = np.mean(power, axis=0) # average over sources itc = np.mean(itc, axis=0) # average over sources times = epochs.times ########################################################################## # View time-frequency plots plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43) plt.subplot(2, 2, 2 * ii + 1) plt.imshow(20 * power, extent=[times[0], times[-1], freqs[0], freqs[-1]], aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r') plt.xlabel('Time (s)') plt.ylabel('Frequency (Hz)') plt.title('Power (%s)' % title) plt.colorbar() plt.subplot(2, 2, 2 * ii + 2) plt.imshow(itc, extent=[times[0], times[-1], freqs[0], freqs[-1]], aspect='auto', origin='lower', vmin=0, vmax=0.7, cmap='RdBu_r') plt.xlabel('Time (s)') plt.ylabel('Frequency (Hz)') plt.title('ITC (%s)' % title) plt.colorbar() plt.show() """ Explanation: Set parameters End of explanation """
enakai00/jupyter_ml4se_commentary
04-Graph.ipynb
apache-2.0
import numpy as np import matplotlib.pyplot as plt import pandas as pd from pandas import Series, DataFrame from numpy.random import randint """ Explanation: グラフの描画 End of explanation """ dices = randint(1,7,(100, 2)) dices[:5] """ Explanation: 2個のサイコロを100回振った結果を保存 End of explanation """ total = np.sum(dices, axis=1) total[:5] """ Explanation: 2個の目の合計を計算 End of explanation """ doublets = [0,0,0,0,0,0] for (x, y) in dices: if x == y: doublets[x-1] += 1 doublets """ Explanation: それぞれの目について、ゾロ目の回数を計算 End of explanation """ counts = np.zeros((6,6)) for (x, y) in dices: counts[y-1, x-1] += 1 print counts """ Explanation: 目の組み合わせごとの回数を計算 End of explanation """ fig = plt.figure(figsize=(14,4)) subplot = fig.add_subplot(1,3,1) subplot.set_title('Sum of 2dices') subplot.set_xlabel('Total') subplot.set_ylabel('Count') subplot.set_xlim(1,13) subplot.hist(total, bins=11, range=(1.5, 12.5), label='Sum') subplot = fig.add_subplot(1,3,2) subplot.set_title('Doublets counts') subplot.set_xlabel('Number') subplot.set_ylabel('Count') subplot.set_xlim(0.5, 6.5) subplot.bar(range(1,7), doublets, align='center') subplot = fig.add_subplot(1,3,3) subplot.set_title('Pair counts') subplot.set_xlabel('Dice1') subplot.set_ylabel('Dice2') subplot.imshow(counts, origin='lower', extent=(0.5,6.5,0.5,6.5), interpolation='nearest') from numpy.random import normal def generate_data01(n): data_x = [] data_y = [] for i in range(n): x = float(i) / float(n-1) # [0, 1]をn等分したi番目の値 y = np.sin(2*np.pi*x) + normal(0, 0.3) data_x.append(x) data_y.append(y) return data_x, data_y def generate_data02(n): data_x = np.linspace(0,1,n) data_y = np.sin(2*np.pi*data_x) + normal(0, 0.3, n) return data_x, data_y fig = plt.figure() data_x, data_y = generate_data01(10) #data_x, data_y = generate_data02(10) subplot = fig.add_subplot(1,1,1) subplot.set_xlabel('Observation point') subplot.set_ylabel('Value') subplot.set_xlim(-0.05,1.05) # 生成したデータを表示 subplot.scatter(data_x, data_y, marker='o', color='blue', label='Observed value') # 三角関数の曲線を表示 linex = np.linspace(0,1,100) liney = np.sin(2*np.pi*linex) subplot.plot(linex, liney, linestyle='--', color='green', label='Theoretical curve') # 凡例を表示 subplot.legend(loc=1) """ Explanation: 計算結果をグラフに表示 End of explanation """
waltervh/BornAgain-tutorial
talks/day_1/python_introduction/BornAgainSchool_Basic.ipynb
gpl-3.0
import sys print(sys.version) """ Explanation: 1. Basic Python Types 1.1 Verifying the python version you are using End of explanation """ print(2 / 3) print(2 // 3) print(2 - 3) print(2 * 3) print(2 ** 3) print(12 % 5) """ Explanation: At this point anything above python 3.5 should be ok. 1.2 Perform basic operations End of explanation """ print("Welcome" + " to the " + "BornAgain" + " School") """ Explanation: Notes: 1.3 String additions End of explanation """ print([1, 2, 3, 4] + [5, 6, 7, 8]) print((1, 2, 3, 4) + (5, 6, 7, 8)) """ Explanation: Notes: 1.4 list and tuples addition End of explanation """ print(5 < 6) print(5 >= 6) print(5 <= 6) print(5 == 6) print(5 in [1, 2, 3, 4, 5, 6, 7]) """ Explanation: Notes: 1.5 Boolean logic: comparisons End of explanation """ print(5 < 6 and 5 >= 6) print(5 >= 6 or 5 < 6) print(not 5 == 6) """ Explanation: Notes: 1.6 Boolean logic: operations End of explanation """ a = 2.0 b = 3.0 c = 4.0 print(type(a)) #print(float.__dict__) print(a / b) print(a.__truediv__(b)) print(c >= b) print(c.__ge__(b)) """ Explanation: Notes: 1.7 Exercise: Use what you have seen to answer: Is 10 smaller than 5 ? Is 10 divisible by 5 ? Is 10 included in the following sequence: 0,10,20,30? Is 10 bigger than 2 or bigger than 20 ? Notes: 1.8 Python Objects End of explanation """ a = [ 0.0, 1.0, 6.2, 5.333, 9, 4, 3.4] print("a is equal to ", a) print("a is of type: ", type(a)) print("a[0] is equal to ", a[0], "and is of type: ", type(a[0])) print("a[4] is equal to ", a[4], "and is of type: ", type(a[4])) """ Explanation: Notes: 1.9 Create a list End of explanation """ b = a[2:4] print(b) b = a[:4] print(b) b = a[2:] print(b) b = a[::-1] print(b) b = a[2:6:2] print(b) """ Explanation: Notes: 1.10 Slice a list End of explanation """ iterator = iter(a) print(type(iterator)) print(next(iterator)) print(next(iterator)) print(next(iterator)) print(next(iterator)) """ Explanation: Notes: 1.11 Exercise: Create a list with ten values from 0 to 9 and slice it to have only the values from 3 to 7 inside Notes: 2. Python: Utilizing Sequences 2.1 Iterators End of explanation """ for element in a: print(element, type(element)) """ Explanation: Notes: 2.2 For loop Design a for loop printing the elements of a End of explanation """ b = 4 for element in a: if element < b: print(element, "is smaller than ", b) elif element == b: print(element, "is equal to ", b) else: print(element, "is bigger than ",b) """ Explanation: Notes: 2.3 if, elif, else statements Design a for loop over the previsouly defined list a and print 'smaller', 'equal'or 'bigger'depending on if the element in a meets these condition compared to 4. End of explanation """
scikit-optimize/scikit-optimize.github.io
dev/notebooks/auto_examples/interruptible-optimization.ipynb
bsd-3-clause
print(__doc__) import sys import numpy as np np.random.seed(777) import os """ Explanation: Interruptible optimization runs with checkpoints Christian Schell, Mai 2018 Reformatted by Holger Nahrstaedt 2020 .. currentmodule:: skopt Problem statement Optimization runs can take a very long time and even run for multiple days. If for some reason the process has to be interrupted results are irreversibly lost, and the routine has to start over from the beginning. With the help of the :class:callbacks.CheckpointSaver callback the optimizer's current state can be saved after each iteration, allowing to restart from that point at any time. This is useful, for example, if you don't know how long the process will take and cannot hog computational resources forever if there might be system failures due to shaky infrastructure (or colleagues...) if you want to adjust some parameters and continue with the already obtained results End of explanation """ from skopt import gp_minimize from skopt import callbacks from skopt.callbacks import CheckpointSaver noise_level = 0.1 def obj_fun(x, noise_level=noise_level): return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2)) + np.random.randn() \ * noise_level checkpoint_saver = CheckpointSaver("./checkpoint.pkl", compress=9) # keyword arguments will be passed to `skopt.dump` gp_minimize(obj_fun, # the function to minimize [(-20.0, 20.0)], # the bounds on each dimension of x x0=[-20.], # the starting point acq_func="LCB", # the acquisition function (optional) n_calls=10, # number of evaluations of f including at x0 n_random_starts=3, # the number of random initial points callback=[checkpoint_saver], # a list of callbacks including the checkpoint saver random_state=777) """ Explanation: Simple example We will use pretty much the same optimization problem as in the sphx_glr_auto_examples_bayesian-optimization.py notebook. Additionally we will instantiate the :class:callbacks.CheckpointSaver and pass it to the minimizer: End of explanation """ from skopt import load res = load('./checkpoint.pkl') res.fun """ Explanation: Now let's assume this did not finish at once but took some long time: you started this on Friday night, went out for the weekend and now, Monday morning, you're eager to see the results. However, instead of the notebook server you only see a blank page and your colleague Garry tells you that he had had an update scheduled for Sunday noon – who doesn't like updates? :class:gp_minimize did not finish, and there is no res variable with the actual results! Restoring the last checkpoint Luckily we employed the :class:callbacks.CheckpointSaver and can now restore the latest result with :class:skopt.load (see sphx_glr_auto_examples_store-and-load-results.py for more information on that) End of explanation """ x0 = res.x_iters y0 = res.func_vals gp_minimize(obj_fun, # the function to minimize [(-20.0, 20.0)], # the bounds on each dimension of x x0=x0, # already examined values for x y0=y0, # observed values for x0 acq_func="LCB", # the acquisition function (optional) n_calls=10, # number of evaluations of f including at x0 n_random_starts=3, # the number of random initialization points callback=[checkpoint_saver], random_state=777) """ Explanation: Continue the search The previous results can then be used to continue the optimization process: End of explanation """
YuriyGuts/kaggle-quora-question-pairs
notebooks/feature-fuzzy.ipynb
mit
from pygoose import * """ Explanation: Feature: Fuzzy String Matching Calculate edit distances between each question pair (Levenshtein, Jaro, Jaro-Winkler, ...). Imports This utility package imports numpy, pandas, matplotlib and a helper kg module into the root namespace. End of explanation """ from fuzzywuzzy import fuzz from jellyfish import jaro_distance, jaro_winkler """ Explanation: Fuzzy matching libraries End of explanation """ project = kg.Project.discover() """ Explanation: Config Automatically discover the paths to various data folders and compose the project structure. End of explanation """ feature_list_id = 'fuzzy' """ Explanation: Identifier for storing these features on disk and referring to them later. End of explanation """ tokens_train = kg.io.load(project.preprocessed_data_dir + 'tokens_spellcheck_train.pickle') tokens_test = kg.io.load(project.preprocessed_data_dir + 'tokens_spellcheck_test.pickle') tokens = tokens_train + tokens_test """ Explanation: Read data Preprocessed and tokenized questions. End of explanation """ def get_fuzzy_distances(pair): q1_tokens, q2_tokens = pair q1_text = ' '.join(pair[0]) q2_text = ' '.join(pair[1]) fuzzy_distances = np.array([ fuzz.ratio(q1_tokens, q2_tokens), fuzz.partial_ratio(q1_tokens, q2_tokens), fuzz.token_sort_ratio(q1_tokens, q2_tokens), fuzz.token_set_ratio(q1_tokens, q2_tokens), fuzz.partial_token_sort_ratio(q1_tokens, q2_tokens), ], dtype='float') # Normalize to [0 - 1] range. fuzzy_distances /= 100 jelly_distances = np.array([ jaro_distance(q1_text, q2_text), jaro_winkler(q1_text, q2_text), ]) return np.concatenate([fuzzy_distances, jelly_distances]) features = kg.jobs.map_batch_parallel( tokens, item_mapper=get_fuzzy_distances, batch_size=1000, ) X_train = np.array(features[:len(tokens_train)]) X_test = np.array(features[len(tokens_train):]) print('X_train:', X_train.shape) print('X_test: ', X_test.shape) """ Explanation: Build features End of explanation """ feature_names = [ 'fuzz_ratio', 'fuzz_partial_ratio', 'fuzz_token_sort_ratio', 'fuzz_token_set_ratio', 'fuzz_partial_token_sort_ratio', 'jaro', 'jaro_winkler', ] project.save_features(X_train, X_test, feature_names, feature_list_id) """ Explanation: Save features End of explanation """
ComputationalModeling/spring-2017-danielak
past-semesters/spring_2016/day-by-day/day10-random-walks-and-random-numbers/Random_Walks_OLD_SOLUTIONS.ipynb
agpl-3.0
# put your code for Part 1 here. Add extra cells as necessary! %matplotlib inline import matplotlib.pyplot as plt import random import math import numpy as np n_trials = 1000 # number of trials (i.e., number of independent walks) n_steps = 100 # number of steps taken during each trial distances = [] # use this empty list to keep track of distance for each trial this_trial = 0 # loop over trials while this_trial < n_trials: this_distance = 0 # distance gone in this trial - reset to zero each time step=0 # what step this trial is on - reset to zero each time # loop until we get to the right number of steps! while(step < n_steps): # random number is either 0 or 1 (heads or tails) if random.randint(0,1) > 0: this_distance += 1 # go right else: this_distance -= 1 # go left step+=1 # keep track of distances (absolute value) distances.append(abs(this_distance)) # keep track of number of trials this_trial += 1 plt.hist(distances) distances = np.array(distances) # convert to numpy array to make life easier # in 1D, asymptotic solution for mean walk length is (n_steps*2/pi)**(1/2) print("expected mean:", math.sqrt(n_steps*2.0/math.pi), "\nactual mean:", distances.mean(), "\nmin:", distances.min(), "\nmax:", distances.max()) """ Explanation: Random Walks In many situations, it is very useful to think of some sort of process that you wish to model as a succession of random steps. This can describe a wide variety of phenomena - the behavior of the stock market, models of population dynamics in ecosystems, the properties of polymers, the movement of molecules in liquids or gases, modeling neurons in the brain, or in building Google's PageRank search model. This type of modeling is known as a "random walk", and while the process being modeled can vary tremendously, the underlying process is simple. In this exercise, we are going to model such a random walk and learn about some of its behaviors! Learning goals: Model a random walk Learn about the behavior of random walks in one and two dimensions Plot both the distribution of random walks and the outcome of a single random walk Group members Put the name of your group members here! Part 1: One-dimensional random walk. Imagine that you draw a line on the floor, with a mark every foot. You start at the middle of the line (the point you have decided is the "origin"). You then flip a "fair" coin N times ("fair" means that it has equal chances of coming up heads and tails). Every time the coin comes up heads, you take one step to the right. Every time it comes up tails, you take a step to the left. Questions: After $N_{flip}$ coin flips and steps, how far are you from the origin? If you repeat this experiment $N_{trial}$ times, what will the distribution of distances from the origin be, and what is the mean distance that you go from the origin? (Note: "distance" means the absolute value of distance from the origin!) First: as a group, come up with a solution to this problem on your whiteboard. Use a flow chart, pseudo-code, diagrams, or anything else that you need to get started. Check with an instructor before you continue! Then: In pairs, write a code in the space provided below to answer these questions! End of explanation """ # put your code for Part 2 here. Add extra cells as necessary! n_trials_2D = 1000 # number of trials (i.e., number of walks) n_steps_2D = 100 # number of steps per trial distances_2D = [] # to keep track of distances of each walk this_trial = 0 # lists for x, y position of *each step* for the last trial. # (This is just for visualization purposes - we want to make a plot.) xpos=[] ypos=[] while this_trial < n_trials_2D: x,y = 0.0,0.0 # keep track of walker position for last trial if this_trial == n_trials_2D - 1: xpos.append(x) ypos.append(y) step=0 # go until we reach the number of steps we want to take while(step < n_steps_2D): # pick a random angle between zero and 2*pi theta = random.random()*2.0*math.pi # step x and y in that direction x += math.cos(theta) y += math.sin(theta) # if last trial, keep track of walker positions if this_trial == n_trials_2D - 1: xpos.append(x) ypos.append(y) step+=1 this_distance = (x**2 + y**2)**0.5 #print(this_trial, abs(this_distance)) distances_2D.append(abs(this_distance)) this_trial += 1 plt.hist(distances_2D) distances_2D = np.array(distances_2D) # expect roughly sqrt(n_steps_2D) to be the right answer! print("expected mean:", math.sqrt(n_steps_2D), "\nactual mean:", distances_2D.mean(), "\nmin:", distances_2D.min(), "\nmax:", distances_2D.max()) plt.plot(0.0,0.0,'ro',xpos,ypos,'b-') #plt.xlim(-16,16) #plt.ylim(-16,16) """ Explanation: Part 2: Two-dimensional walk Now, we're going to do the same thing, but in two dimensions, x and y. This time, you will start at the origin, pick a random direction, and take a step one foot in that direction. You will then randomly pick a new direction, take a step, and so on, for a total of $N_{step}$ steps. Questions: After $N_{step}$ random steps, how far are you from the origin? If you repeat this experiment $N_{trial}$ times, what will the distribution of distances from the origin be, and what is the mean distance that you go from the origin? (Note: "distance" means the absolute value of distance from the origin!) Does the mean value differ from Part 1? For one trial, plot out the steps taken in the x-y plane. Does it look random? First: As before, come up with a solution to this problem on your whiteboard as a group. Check with an instructor before you continue! Then: In pairs, write a code in the space provided below to answer these questions! End of explanation """ # put your code for Part 3 here. Add extra cells as necessary! n_trials = 100000 n_steps = 100 prob_right = 0.55 distances = [] this_trial = 0 while this_trial < n_trials: this_distance = 0 step=0 while(step < n_steps): if random.random() < prob_right: this_distance += 1 else: this_distance -= 1 step+=1 distances.append(this_distance) this_trial += 1 plt.hist(distances) distances = np.array(distances) print("mean distance:", distances.mean(), "\nabs mean distance:", np.abs(distances).mean()) """ Explanation: Part 3: A different kind of random walk. If you have time, copy and paste your 1D random walk code in the cell below. This time, modify your code so that the coin toss is biased - that you are more likely to take a step in one direction than in the other (i.e., the probability of stepping to the right is $p_{step}$, of stepping to the left is $1-p_{step}$, and $p_{step} \neq 0.5$). How does the distibution of distances gone, as well as the mean distance from the origin, change as $p_{step}$ varies from 0.5? End of explanation """
aidiary/notebooks
keras/170526-airline-passengers.ipynb
mit
%matplotlib inline import pandas import matplotlib.pyplot as plt dataset = pandas.read_csv('data/international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3) plt.plot(dataset) plt.show() dataset """ Explanation: Time Series Prediction with LSTM Recurrent Neural Networks in Python with Keras 時系列データの予測 End of explanation """ import numpy as np import matplotlib.pyplot as plt import pandas import math from keras.models import Sequential from keras.layers import Dense, LSTM from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error # 再現性を担保するために固定したほうがよい np.random.seed(7) # load the dataset dataframe = pandas.read_csv('data/international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3) dataset = dataframe.values type(dataframe), type(dataset) dataset = dataset.astype('float32') dataset.shape """ Explanation: データセットの作成 End of explanation """ # normalize the dataset scaler = MinMaxScaler(feature_range=(0, 1)) dataset = scaler.fit_transform(dataset) dataset[:10] # split into train and test sets train_size = int(len(dataset) * 0.67) test_size = len(dataset) - train_size train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :] print(len(train), len(test)) """ Explanation: 活性化関数にsigmoidやtanhを使うときは入力のスケールに大きな影響をうける 入力は[0, 1]に正規化するとよい scikit-learnのMinMaxScalerが便利 End of explanation """ def create_dataset(dataset, look_back=1): dataX, dataY = [], [] for i in range(len(dataset) - look_back - 1): a = dataset[i:(i + look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) return np.array(dataX), np.array(dataY) look_back = 1 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) print(trainX.shape) print(trainY.shape) print(testX.shape) print(testY.shape) """ Explanation: 1つ前の時刻tの値から次の時刻t+1の値を予測するのが課題 (t)と(t+1)の値のペアを訓練データとする Xの方はlook_back(いくつ前まで使うか)によて複数の値があるため2次元配列になる End of explanation """ trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1])) print(trainX.shape) print(testX.shape) """ Explanation: trainXは (samples, features) の配列 LSTMでは入力を (samples, time steps, features) の配列にする必要がある look_backが大きい場合は入力が系列であるがfeaturesと考える? End of explanation """ model = Sequential() # input_shape=(input_length, input_dim) # look_back次元の系列長1のデータが入力、出力は4次元ベクトル # 系列長1なので記憶は使われない? LSTMに入れたらすぐ出てくる model.add(LSTM(4, input_shape=(1, look_back))) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2) """ Explanation: LSTMの訓練 LSTMはRNNの一種 BPTT (Backpropagation Through Time) で訓練する LSTMは最近のシーケンスを記憶できる Memory Block を使う Forgate Gate: 記憶から何を捨て去るかを決める Input Gate: 記憶状態を更新するための入力からの値を決める Output Gate: 入力と記憶状態から何を出力するかを決める End of explanation """ trainPredict = model.predict(trainX) testPredict = model.predict(testX) # 出力は正規化されているため元のスケールに戻す trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) print(trainPredict.shape, trainY.shape) print(testPredict.shape, testY.shape) trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0])) print('Test Score: %.2f RMSE' % (testScore)) """ Explanation: 予測 End of explanation """ trainPredictPlot = np.empty_like(dataset) trainPredictPlot[:, :] = np.nan trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict testPredictPlot = np.empty_like(dataset) testPredictPlot[:, :] = np.nan testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict # 元データをプロット(青) plt.plot(scaler.inverse_transform(dataset)) # 訓練内データの予測をプロット(緑) plt.plot(trainPredictPlot) # テストデータの予測をプロット plt.plot(testPredictPlot) """ Explanation: 結果をプロット End of explanation """ look_back = 3 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) print(trainX.shape) print(trainY.shape) print(testX.shape) print(testY.shape) trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1])) print(trainX.shape) print(testX.shape) """ Explanation: Windows Size 過去の3つ分のデータ(t-2, t-1, t) から次のデータ (t+1) を予測する このやり方では過去のデータを系列ではなく次元長として扱う(次元長は固定) End of explanation """ model = Sequential() # input_shape=(input_length, input_dim) model.add(LSTM(4, input_shape=(1, look_back))) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2) # 予測 trainPredict = model.predict(trainX) testPredict = model.predict(testX) # 元のスケールに戻す trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0])) print('Test Score: %.2f RMSE' % (testScore)) """ Explanation: 系列長は1のままで入力次元を3としている End of explanation """ look_back = 3 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) # [samples, time steps, features] # 3次元の系列長1のデータ => 1次元の系列長3のデータ trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1)) testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1)) print(trainX.shape, testX.shape) model = Sequential() # input_shape=(input_length, input_dim) # 入力データの次元が1で系列長がlook_backになった! model.add(LSTM(4, input_shape=(look_back, 1))) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2) # 予測 trainPredict = model.predict(trainX) testPredict = model.predict(testX) # 元のスケールに戻す trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0])) print('Test Score: %.2f RMSE' % (testScore)) """ Explanation: WindowSize=1より少し改善した! 入力を特徴ではなく系列として扱うアプローチ 過去の観測を別々の入力特徴量として表現するのではなく、入力特徴量の系列として使用することができます これは実際に問題の正確な枠組みになります 入力の系列3の間はLSTMが記憶している? 系列長を変えられる?(0パディング?) End of explanation """ trainPredictPlot = np.empty_like(dataset) trainPredictPlot[:, :] = np.nan trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict testPredictPlot = np.empty_like(dataset) testPredictPlot[:, :] = np.nan testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict # 元データをプロット(青) plt.plot(scaler.inverse_transform(dataset)) # 訓練内データの予測をプロット(緑) plt.plot(trainPredictPlot) # テストデータの予測をプロット plt.plot(testPredictPlot) """ Explanation: 結果はちょっと悪化した・・・ End of explanation """ look_back = 3 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) # [samples, time steps, features] # 3次元の系列長1のデータ => 1次元の系列長3のデータ trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1)) testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1)) batch_size = 1 model = Sequential() model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') #model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2) for i in range(100): model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=2, shuffle=False) model.reset_states() # 予測 trainPredict = model.predict(trainX, batch_size=batch_size) testPredict = model.predict(testX, batch_size=batch_size) # 元のスケールに戻す trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0])) print('Test Score: %.2f RMSE' % (testScore)) trainPredictPlot = np.empty_like(dataset) trainPredictPlot[:, :] = np.nan trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict testPredictPlot = np.empty_like(dataset) testPredictPlot[:, :] = np.nan testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict # 元データをプロット(青) plt.plot(scaler.inverse_transform(dataset)) # 訓練内データの予測をプロット(緑) plt.plot(trainPredictPlot) # テストデータの予測をプロット plt.plot(testPredictPlot) """ Explanation: LSTMの内部状態 Kerasのデフォルトでは各訓練バッチで内部状態がリセットされる またpredict()やevaluate()を呼ぶたびにリセットされる statefulにすることで訓練中はずっと内部状態を維持することができる ```python LSTMオブジェクトの作成 model.add(LSTM(4, batch_input_shape=(batch_size, time_steps, features), stateful=True) 訓練ループの書き方 for i in range(100): model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=2, shuffle=False) model.reset_states() 予測の仕方 model.predict(trainX, batch_size=batch_size) ``` LSTM作成時にstateful=Trueを指定する batch_input_shapeでバッチサイズなどの情報も追加する fit時はshuffle=Falseにする 各エポックの最後で明示的にreset_states()する predict時もbatch_sizeを与える End of explanation """ look_back = 3 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) # [samples, time steps, features] # 3次元の系列長1のデータ => 1次元の系列長3のデータ trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1)) testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1)) batch_size = 1 model = Sequential() model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True, return_sequences=True)) model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') #model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2) for i in range(100): model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=2, shuffle=False) model.reset_states() # 予測 trainPredict = model.predict(trainX, batch_size=batch_size) testPredict = model.predict(testX, batch_size=batch_size) # 元のスケールに戻す trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0])) print('Test Score: %.2f RMSE' % (testScore)) trainPredictPlot = np.empty_like(dataset) trainPredictPlot[:, :] = np.nan trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict testPredictPlot = np.empty_like(dataset) testPredictPlot[:, :] = np.nan testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict # 元データをプロット(青) plt.plot(scaler.inverse_transform(dataset)) # 訓練内データの予測をプロット(緑) plt.plot(trainPredictPlot) # テストデータの予測をプロット plt.plot(testPredictPlot) """ Explanation: Stacked LSTM LSTMを複数つなげることができる 1つ前のLSTMが最終出力ではなく出力の系列を返す必要がある return_sequences=Trueとする python model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True, return_sequences=True)) model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True)) End of explanation """
TheMitchWorksPro/DataTech_Playground
Python_Misc/TMWP_DFBuilder_OO_PY/testing_and_documentation/TMWP_DFBuilder_GMapsSubClass_Module_Testing.ipynb
mit
# general libraries import pandas as pd ## required for Google Maps API code import os ## for larger data and/or make many requests in one day - get Google API key and use these lines: # os.environ["GOOGLE_API_KEY"] = "YOUR_GOOGLE_API_Key" ## for better security (PROD environments) - install key to server and use just this line to load it: # os.environ.get('GOOGLE_API_KEY') # set up geocode from geopy.geocoders import Nominatim geolocator = Nominatim() from geopy.exc import GeocoderTimedOut import time # note: for now could do this ... used time because it is already in use """ Explanation: <div align="right">Python 3.6</div> Testing The Google Maps Subclass This notebook was created to test objects associated with extracting information into a Dataframe using the Google Maps API. Initially, this was part of an effort to operationalize the interesting bits of code in a messy procedure I did for some research. The original use case was to extract just the address and add it to tables of information with latitude and longitude in them. This notebook may grow to test more related objects as they are developed and/or expansions of the original code. Enrich or Change Larger Dataframe Section by Section The purpose of the <font color=blue><b>DFBuilder</b></font> object is to allow scanning of a larger dataframe, a small number of rows at a time. It then allows code to be customized to make changes and build up a new dataframe from the results. The operation is in a standard loop by design. The original use case was to add a field with data accessed from an API off the web, and time delays were necessary (as well as other logic) to prevent (or at least reduce the risk of) server timeouts during operation. Scanning through the source a few lines at a time, performing the operation and adding back out to the target DF creates a "caching effect" where data is saved along the way so in the event of a server time-out all is not lost. The resulting DF can then be saved out to a file and a rerun of <font color=blue><b>buildOutDF()</b></font> should make it possible to pick up where you left off and add in more data (instead of losing everything and having to begin again). The abstract class sets up the core logic and subclasses add in functions to modify the data in different ways and potentially using different APIs. This notebook only tests the subclass designed for the Google Maps API. Libraries Needed Import statements included in this notebook are for the main abstract object and a test object. End of explanation """ ## Test code on a reasonably small DF tst_lat_lon_df = pd.read_csv("testset_unique_lat_and_lon_vals.csv", index_col=0) tst_lat_lon_df.describe() tst_lat_lon_df.tail() """ Explanation: Test Data Input Data Set up Here End of explanation """ # note: gmtime() produced results in Grenwich Mean Time # localtime() seems to get the local time from the computer (in my case EST) from time import localtime, strftime def getNow(): return strftime("%Y-%m-%d %H:%M:%S", localtime()) getNow() from abc import ABCMeta, abstractmethod import pandas as pd class DFBuilder(object, metaclass=ABCMeta): # sets up abstract class '''DataFrame Builder abstract class. Sets up logic to be inherited by objects that need to loop over a DataFrame and cache the results. Original use case involves making API calls to the web which can get interrupted by errors and server timeouts. This object stores all the logic to build up and save a DataFrame a small number of records at a time. Then a subclass can define an abstract method in the base class as to what we want to do to the input data. Original use case added in content extracted form the web to a new column. But subclasses can be built to do more. Initialization argumens: endRw, time_delay. endRw = number of records to cache at a time when building outDF. time_delay is number of seconds delay between each cycle of the loop that builds outDF.''' def __init__(self,endRw,time_delay): # abstract classes can be subclassed self.endRow=endRw # but cannot be instantiated self.delay=time_delay self.tmpDF=pd.DataFrame() # temp DF will be endRow rows in length self.outDF=pd.DataFrame() # final DF build in sets of endRow rows so all is not lost in a failure self.lastIndex = None self.statusMsgGrouping = 100 def __str__(self): return ("Global Settings for this object: \n" + "endRow: " + str(self.endRow) + "\n" + "delay: " + str(self.delay) + "\n" + "statusMsgGrouping: " + str(self.statusMsgGrouping) + "\n" "Length of outDF: " + str(len(self.outDF)) + "\n" + "nextIndex: " + str(self.lastIndex)) # if continuing build process with last added table - index of next rec. @abstractmethod # abstract method definition in Python def _modifyTempDF_(): pass # This method will operate on TempDF inside the loop def set_statusMsgGrouping(self, newValue): '''Change number of records used to determine when to provide output messages during buildOutDF(). Default is 100 records. newValue=x sets this to a new number. Note that If endRow is not a factor of statusMsgGrouping output may appear at unexpected intervals. endRow sets the number of rows to cache to outDF in each iteration of the build loop.''' self.statusMsgGrouping = newValue print(self) def set_timeDelay(self, newValue): '''Change number of seconds in time delay between requests while creating outDF(). Default is 1 second. newValue=x sets this to a new number.''' self.delay = newValue print(self) def set_endRow_OutDf_caching(self, newValue): '''Change value of endRow which controls how many rows to cache at a time within buildOutDF(). Default is 5. If something goes wrong and you have to restart the process, this value also represents the maximum number of requests you will lose. The rest will have already been added to outDF. newValue=x sets this to a new number.''' self.endRow = newValue print(self) def buildOutDF(self, inputDF): '''Scans inputDF using self.endRow rows (default of 5) at a time to do it. It then calls in logic from _modifyTempDF()_ to make changes to each subset of rows and appends tiny tempDF onto an outDF. When the subclass is using a web API, self.delay tells it how much time to delay each iteration of the loop. Should this function fail in the middle, outDF will have all work up to the failure. This can be saved out to a DF or csv. The function can be run again on a subset of the data (the records not encountered yet before the failure).''' lenDF = len(inputDF) print("Timestamp: ", getNow()) print("Processing inputDF with length of: ", lenDF) print("Please wait ...") endIndx = 0 i = 0 while i < lenDF: # print("i: ", i) endIndx = i + self.endRow if endIndx > lenDF: endIndx = lenDF # print("Range to use: ", i, ":", endIndx) if i % self.statusMsgGrouping == 0: print(getNow(), "Now processing index: ", i) self.tmpDF = inputDF[i:endIndx].copy(deep=True) self._modifyTempDF_() time.sleep(self.delay) self.outDF = self.outDF.append(self.tmpDF) self.lastIndex = endIndx i = endIndx # print("i at end of loop: ", i) self.reindex_OutDF() print("Process complete. %d records added to outDF." %(self.lastIndex)) print("Timestamp: ", getNow()) def reindex_OutDF(self): '''Reindex OutDF using same settings that are used internally for the index during its creation. This is like doing: outDF.reset_index(drop=True, inplace=True).''' self.outDF.reset_index(drop=True, inplace=True) class GMapsLoc_DFBuilder(DFBuilder): '''This class inherits DFBuilder.buildOutDF() which makes use of data extraction and nodification functions in this subclass. endRw sets number of rows to process at a time while building outDF (default=5). time_delay can set the time delay between loop iterations to help prevent licensing issues and related server timeouts. Default is 1 second. Initialization arguments: endRw, time_delay, return_null. * endRw controls grouping: process endRow rows at a time and add to outDF (default is 5). * time_delay has default of 1 second and sets how much time to wait each request whild building outDF. * return_null, if False, records error text formatted as "_<errTxt>_" for records that failed to process. Set to True to have it return blank records when errors occur instead (default is False).''' def __init__(self, endRw=5,time_delay=1, return_null=False): super().__init__(endRw,time_delay) self.rtn_null = return_null self.timeout = 10 self.location = "" # stores last location accessed using getGeoAddr def __str__(self): outStr = (super().__str__() + "\n" + "rtn_null: " + str(self.rtn_null) + "\n" + "timeout: " + str(self.timeout) + "\n") if isinstance(self.location, (type(None), str)): outStr = outStr + "location (last obtained): " + str(self.location) else: outStr = outStr + "location (last obtained): " + str(self.location.raw) return outStr def set_ServerTimeout(self, newValue): '''Change number of seconds for the server timeout setting used during web requests. Default is 10 second. newValue=x sets this to a new number.''' self.timeout = newValue print(self) def testConnection(self, lat=48.8588443, lon=2.2943506): '''Test getGeoAddr() function to prove connection to Google Maps is working. Use this ahead of performing much larger operations with Google Maps.''' return self.getGeoAddr(lat, lon) def getGeoAddr(self, lt, lng, timeout=10, test=False, rtn_null=False): '''Make call to Google Maps API to return back just the address from the json location record. Errors should result in text values to help identify why an address was not returned. This can be turned off and records that failed can bring back just an empty field by setting rtn_null to True. timeout = server timeout and has a default that worked well during testing. ''' try: self.location = geolocator.reverse(str(lt) + ", " + str(lng), timeout=timeout) if test == True: print("===============================") print("Address:\n") print(self.location) print("===============================") rtnVal = self.location else: rtnVal = self.location.address except GeocoderTimedOut as gEoTo: print(type(gEoTo)) print(gEoTo) self.location = None rtnVal = "_" + str(eee).upper().replace(' ', '_').replace(':', '') + "_" ## old error text: "_TIME_OUT_ERROR_ENCOUNTERED_" except Exception as eee: print(type(eee)) print(eee) self.location = None rtnVal = "_" + str(eee).upper().replace(' ', '_').replace(':', '') + "_" finally: # time_delay is not included here and should be incorporated into # the loop that calls this function if desirable if rtn_null==True and self.location is None: return "" else: return rtnVal def _modifyTempDF_(self, test=False): '''Add Address Field to tempDF based on lat, lon (latitude/longitude field values in inputDF)''' self.tmpDF["Address"] = self.tmpDF.apply(lambda x: self.getGeoAddr(lt=x.lat,lng=x.lon, timeout=self.timeout,test=False, rtn_null=self.rtn_null), axis=1) """ Explanation: Code Testing The abstract class which follows is intended to be the "work horse" of this code. Intent is that it gets the developer to the point where all they need to think about is what their final subclass will do to enrich the data. The parent class sets up a loop that can extract from a larger input DF, a small number of rows to be operated on in a temp DF and then be added to an outputDF. In the event of something interrupting the process (a common event when dealing with web APIs), modified rows created before the incident are waiting in output DF and can be extracted. Then code can be restarted or continued to allow building up the rest of the Dataframe without losing previous work or having to go all the way back to the beginnin. End of explanation """ ## build main object using the defaults testObj = GMapsLoc_DFBuilder() print(testObj) testObj.buildOutDF(tst_lat_lon_df) ## some tests not shown performed ahead of this run ## errors should be result of exceeding daily record allotment ## for free Google Maps API license ## this code tests basic functioning and default error handling testObj.outDF.head() testObj.outDF.tail() ## this check shows default behavior ## errors recorded in address field so user can find out why a particular location ## failed to return results - in this case "too many requests" (for license allotment) ## errors begin and end with "_" which an address will not. ## a query or filter of the data for addresses starting with "_" can inform the user ## which records need to be run again ## change error handling and a few other default parameters testObj.rtn_null = True ## change error handling: bad records will not simply get blank Address values testObj.set_statusMsgGrouping(10) ## get status message about every 10 records (this will be a small test) testObj.set_timeDelay(0) ## remove time delay (this increases risk of errors) ## note: each set_ function outputs current state of variables ## each output begins with "Global settings ..." ## last one is what these settings look like going into the next test testObj.buildOutDF(tst_lat_lon_df[-25:]) ## redo end of DF .. should be entirely blank since we're out of licenses ## rtn_null = False told code to return empty cell instead of error text ## in production, it may be easier to just search for the nulls to get ## which records to redo, then delete nulls and add in missing records. testObj.outDF.tail() print(testObj) ## final look at settings for this object after process is complete """ Explanation: Testing of The Subclass A different subclass was created in another notebook to test most if not all of the non-web related logic of the Abstract class. This means testing in this notebook can focus on the code that produces final results and that interacts with the Google Maps API. This section shows how the code can build up outDF() adding addresses obtained from the Google Maps API to the latitude and longitude provided in the input data. Tests show how errors are handled, both as text of the form "<errorTxt>" in the address field, or as empty strings if you set rtn_null to True. Tests also show how data can be added to outDF by re-running the build function. This allows adding of additional data to outDF, or of adding in data that was missed due to server timeout errors or other interruptions to the web process. Test Main Logic with Error Handling Exposed These tests were designed to show the error handling in action. For the sake of brevity, earlier testing was deleted to just show later tests in which errors are expected (due to exceeding daily license allotment from Google). End of explanation """ ### quick clean test with fresh alotment of license records for the day # illustrates adding more in later and a run with no errors in it # * do 600 initially # * then add in the end of DF testObj = GMapsLoc_DFBuilder() print(testObj) testObj.buildOutDF(tst_lat_lon_df[0:600]) testObj.buildOutDF(tst_lat_lon_df[600:]) ## end of the df added in ## in this text, indicies between input/output will match ## since every record was added in using the same sequence tst_lat_lon_df.tail() ## final records in the input testObj.outDF.tail() ## final records in the output """ Explanation: Test Main Logic - Fresh Alotment of Licenses (No Errors Expected) Note that an error could still occur due to a server timeout, a server being down (on the Google site) or some other unexpected event. This test was set up to maximize the likelihood of showing what output can look like when no errors occur. Since at least one error seems to occur in batches of 900 or more, data is split in half with the second half added in after the first for the test set. End of explanation """ ## do test with testObjDocs - resetting it to blank to start fresh testObjDocs = GMapsLoc_DFBuilder() print(testObjDocs) testObjDocs.buildOutDF(tst_lat_lon_df) testObjDocs.outDF[975:1000] ## spot check run on batches of 25 records to find the bad ones (b/2 900 and 1000) ## bad records found here len(testObjDocs.outDF) # current length of DF ## our first run: index of input will be same as index on output ## test showing that records on input match the problem range in output tst_lat_lon_df[985:990] testObjDocs.buildOutDF(tst_lat_lon_df[985:990]) testObjDocs.outDF.tail(10) ## new records on the end ... still need to delete the bad ones testObjDocs.outDF.drop(testObjDocs.outDF.index[985:990], inplace=True) testObjDocs.outDF[984:991] ## as expected - bad rows dropped but we now have an indexing issue testObjDocs.outDF.tail() ## fix index: testObjDocs.reindex_OutDF() testObjDocs.outDF[984:991] testObjDocs.outDF.tail() ## note: all records are in here now but indices will be different from input DF """ Explanation: Experiment in Cleaning Up Results This test was run with a fresh alotment of google license records for the day. It should have completed without error but the server went down causing 5 error records instead. This test shows what to do in this scenario. End of explanation """ gMapAddrDat = GMapsLoc_DFBuilder(endRw=4, time_delay=3, return_null=True) gMapAddrDat.set_statusMsgGrouping(12) gMapAddrDat.set_endRow_OutDf_caching(3) gMapAddrDat.set_timeDelay(0) gMapAddrDat.set_ServerTimeout(9) gMapAddrDat.buildOutDF(tst_lat_lon_df[0:50]) print(gMapAddrDat) gMapAddrDat.location = "" print(gMapAddrDat) """ Explanation: Testing of Enhanced Print() and set_ Functions Now has logic to handle output in different way. We get to see location.raw if possible, and it knows what to do if location is None or an empty string. Also testing build parameters for first time and new set_ functions. End of explanation """ gMapAddrDat.testConnection() ## uses default test record to just ensure connection is working gMapAddrDat.getGeoAddr(40.699100, -73.703697, test=True) ## function called by buildOutDF() ## use test mode to obtain more information tstLoc1 = gMapAddrDat.getGeoAddr(40.699100, -73.703697, test=True) ## use .raw on output during testing print(type(tstLoc1)) ## to view JSON structure of Location obj tstLoc1.raw tstLoc1 = gMapAddrDat.getGeoAddr(40.699100, -73.703697) ## default: test=False print(type(tstLoc1)) ## when called internally to build the address field for tstLoc1 ## for outDF, it just returns an address string """ Explanation: Test of Other Internal Functions These functions can be used in testing or to just get back a single value. These examples might prove useful. End of explanation """ # create new object to test the docstrings and some more quick coding tweaks testObjDocs = GMapsLoc_DFBuilder() print(testObjDocs) help(testObjDocs) print(testObjDocs.__doc__) # note: formatting is messed up if you do not use print() on the doc string print(testObjDocs.buildOutDF.__doc__) # buildOutDF help(DFBuilder) """ Explanation: Documentation Tests End of explanation """
thalesians/tsa
src/jupyter/python/signatures.ipynb
apache-2.0
import os, sys sys.path.append(os.path.abspath('../../main/python')) import datetime as dt import numpy as np import pandas as pd import thalesians.tsa.signatures as signatures import importlib importlib.reload(signatures) """ Explanation: Time series signatures End of explanation """ df = pd.DataFrame( np.array(((1.,1.),(3.,4.),(5.,2.),(8.,6.))), columns=('A', 'B')) df """ Explanation: Basic example This is the data from the example in [CK16], Section 2.1.1, equations (2.1)-(2.3): End of explanation """ signatures.signature(df) """ Explanation: This reproduces equation (2.11) in [CK16]: End of explanation """ df = pd.DataFrame( np.array([[1.], [3.], [5.], [9.], [10.]]), columns=['A']) signatures.signature(df, max_word_length=5) df = pd.DataFrame( np.array([[1.], [3.], [2.], [-9.], [10.]]), columns=['A']) signatures.signature(df, max_word_length=5) """ Explanation: (An even simpler) variation on the basic example End of explanation """ df = pd.DataFrame([ [63.62, 41.48, 177.14, 102.22, 105.35, 759.44, 33.31, 13.97], [63.73, 41.68, 174.09, 102.73, 102.71, 761.53, 32.43, 13.72], [62.81, 41.05, 169.84, 102.97, 100.70, 759.33, 31.27, 13.11], [60.27, 40.27, 164.62, 97.92, 96.45, 741.00, 29.99, 12.70], [58.92, 39.70, 163.94, 97.33, 96.96, 730.91, 29.53, 12.54], [58.83, 39.86, 165.73, 97.51, 98.53, 733.07, 30.25, 12.77], [58.96, 40.62, 165.71, 99.37, 99.96, 745.34, 30.30, 12.85], [57.34, 39.01, 158.99, 95.44, 97.39, 719.57, 30.49, 12.20], [58.20, 39.93, 161.39, 98.37, 99.52, 731.39, 30.30, 12.19], [57.04, 39.05, 155.61, 94.97, 97.13, 710.49, 29.57, 11.97], [57.01, 39.13, 156.82, 95.26, 96.66, 719.08, 29.40, 11.95], [55.51, 38.87, 153.75, 94.35, 96.79, 718.56, 29.42, 11.90], [55.25, 38.52, 151.65, 94.16, 96.30, 726.67, 29.55, 12.01], [56.95, 39.37, 156.86, 97.94, 101.42, 745.46, 29.28, 12.14], [55.66, 38.29, 151.12, 97.01, 99.44, 733.62, 29.23, 11.98], [57.08, 39.02, 154.45, 97.34, 99.99, 733.79, 29.65, 12.26], [57.04, 39.18, 153.72, 94.45, 93.42, 717.58, 29.21, 11.85], [57.28, 39.22, 157.06, 109.11, 94.09, 748.30, 29.02, 11.71], [59.50, 40.06, 161.56, 112.21, 97.34, 761.35, 29.64, 11.94], [58.86, 39.80, 159.65, 115.09, 96.43, 770.77, 30.11, 12.07], [57.03, 38.75, 151.70, 114.61, 94.48, 780.91, 29.65, 11.51], [57.41, 39.28, 152.68, 112.69, 96.35, 749.38, 28.92, 11.46], [58.40, 40.11, 156.49, 110.49, 96.60, 730.03, 28.64, 11.53], [57.75, 40.09, 156.47, 104.07, 94.02, 703.76, 28.54, 11.45], [56.54, 39.53, 149.25, 99.75, 95.01, 704.16, 28.68, 11.59]], index=[ dt.datetime(2016, 1, 4), dt.datetime(2016, 1, 5), dt.datetime(2016, 1, 6), dt.datetime(2016, 1, 7), dt.datetime(2016, 1, 8), dt.datetime(2016, 1, 11), dt.datetime(2016, 1, 12), dt.datetime(2016, 1, 13), dt.datetime(2016, 1, 14), dt.datetime(2016, 1, 15), dt.datetime(2016, 1, 19), dt.datetime(2016, 1, 20), dt.datetime(2016, 1, 21), dt.datetime(2016, 1, 22), dt.datetime(2016, 1, 25), dt.datetime(2016, 1, 26), dt.datetime(2016, 1, 27), dt.datetime(2016, 1, 28), dt.datetime(2016, 1, 29), dt.datetime(2016, 2, 1), dt.datetime(2016, 2, 2), dt.datetime(2016, 2, 3), dt.datetime(2016, 2, 4), dt.datetime(2016, 2, 5), dt.datetime(2016, 2, 8)], columns=[ 'JPM UN Equity', 'USB UN Equity', 'GS UN Equity', 'FB UW Equity', 'AAPL UW Equity', 'GOOGL UW Equity', 'GM UN Equity', 'F UN Equity']) df signatures.signature(df, max_word_length=2) signatures.signature(df[['JPM UN Equity','USB UN Equity']], max_word_length=2) signatures.signature(df[['JPM UN Equity']]) """ Explanation: Slightly more involved End of explanation """
albahnsen/ML_RiskManagement
exercises/04-CreditScoring.ipynb
mit
import pandas as pd pd.set_option('display.max_columns', 500) import zipfile with zipfile.ZipFile('../datasets/KaggleCredit2.csv.zip', 'r') as z: f = z.open('KaggleCredit2.csv') data = pd.read_csv(f, index_col=0) data.head() data.shape """ Explanation: Exercise 04 Logistic regression for credit scoring Banks play a crucial role in market economies. They decide who can get finance and on what terms and can make or break investment decisions. For markets and society to function, individuals and companies need access to credit. Credit scoring algorithms, which make a guess at the probability of default, are the method banks use to determine whether or not a loan should be granted. This competition requires participants to improve on the state of the art in credit scoring, by predicting the probability that somebody will experience financial distress in the next two years. Dataset Attribute Information: |Variable Name | Description | Type| |----|----|----| |SeriousDlqin2yrs | Person experienced 90 days past due delinquency or worse | Y/N| |RevolvingUtilizationOfUnsecuredLines | Total balance on credit divided by the sum of credit limits | percentage| |age | Age of borrower in years | integer| |NumberOfTime30-59DaysPastDueNotWorse | Number of times borrower has been 30-59 days past due | integer| |DebtRatio | Monthly debt payments | percentage| |MonthlyIncome | Monthly income | real| |NumberOfOpenCreditLinesAndLoans | Number of Open loans | integer| |NumberOfTimes90DaysLate | Number of times borrower has been 90 days or more past due. | integer| |NumberRealEstateLoansOrLines | Number of mortgage and real estate loans | integer| |NumberOfTime60-89DaysPastDueNotWorse | Number of times borrower has been 60-89 days past due |integer| |NumberOfDependents | Number of dependents in family | integer| Read the data into Pandas End of explanation """ data.isnull().sum(axis=0) data.dropna(inplace=True) data.shape """ Explanation: Drop na End of explanation """ y = data['SeriousDlqin2yrs'] X = data.drop('SeriousDlqin2yrs', axis=1) y.mean() """ Explanation: Create X and y End of explanation """
AllenDowney/ModSimPy
soln/chap21soln.ipynb
mit
# Configure Jupyter so figures appear in the notebook %matplotlib inline # Configure Jupyter to display the assigned value after an assignment %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' # import functions from the modsim.py module from modsim import * """ Explanation: Modeling and Simulation in Python Chapter 21 Copyright 2017 Allen Downey License: Creative Commons Attribution 4.0 International End of explanation """ m = UNITS.meter s = UNITS.second kg = UNITS.kilogram """ Explanation: With air resistance Next we'll add air resistance using the drag equation I'll start by getting the units we'll need from Pint. End of explanation """ params = Params(height = 381 * m, v_init = 0 * m / s, g = 9.8 * m/s**2, mass = 2.5e-3 * kg, diameter = 19e-3 * m, rho = 1.2 * kg/m**3, v_term = 18 * m / s) """ Explanation: Now I'll create a Params object to contain the quantities we need. Using a Params object is convenient for grouping the system parameters in a way that's easy to read (and double-check). End of explanation """ def make_system(params): """Makes a System object for the given conditions. params: Params object returns: System object """ diameter, mass = params.diameter, params.mass g, rho = params.g, params.rho, v_init, v_term = params.v_init, params.v_term height = params.height area = np.pi * (diameter/2)**2 C_d = 2 * mass * g / (rho * area * v_term**2) init = State(y=height, v=v_init) t_end = 30 * s dt = t_end / 100 return System(params, area=area, C_d=C_d, init=init, t_end=t_end, dt=dt) """ Explanation: Now we can pass the Params object make_system which computes some additional parameters and defines init. make_system uses the given radius to compute area and the given v_term to compute the drag coefficient C_d. End of explanation """ system = make_system(params) """ Explanation: Let's make a System End of explanation """ def slope_func(state, t, system): """Compute derivatives of the state. state: position, velocity t: time system: System object returns: derivatives of y and v """ y, v = state rho, C_d, g = system.rho, system.C_d, system.g area, mass = system.area, system.mass f_drag = rho * v**2 * C_d * area / 2 a_drag = f_drag / mass dydt = v dvdt = -g + a_drag return dydt, dvdt """ Explanation: Here's the slope function, including acceleration due to gravity and drag. End of explanation """ slope_func(system.init, 0, system) """ Explanation: As always, let's test the slope function with the initial conditions. End of explanation """ def event_func(state, t, system): """Return the height of the penny above the sidewalk. """ y, v = state return y """ Explanation: We can use the same event function as in the previous chapter. End of explanation """ results, details = run_ode_solver(system, slope_func, events=event_func) details """ Explanation: And then run the simulation. End of explanation """ results.head() results.tail() """ Explanation: Here are the results. End of explanation """ t_sidewalk = get_last_label(results) * s """ Explanation: The final height is close to 0, as expected. Interestingly, the final velocity is not exactly terminal velocity, which suggests that there are some numerical errors. We can get the flight time from results. End of explanation """ def plot_position(results): plot(results.y) decorate(xlabel='Time (s)', ylabel='Position (m)') plot_position(results) savefig('figs/chap21-fig01.pdf') """ Explanation: Here's the plot of position as a function of time. End of explanation """ def plot_velocity(results): plot(results.v, color='C1', label='v') decorate(xlabel='Time (s)', ylabel='Velocity (m/s)') plot_velocity(results) """ Explanation: And velocity as a function of time: End of explanation """ # Solution v_init = -30 * m / s params2 = Params(params, v_init=v_init) # Solution system2 = make_system(params2) results, details = run_ode_solver(system2, slope_func, events=event_func, max_step=0.5) details.message plot_position(results) # Solution plot_velocity(results) """ Explanation: From an initial velocity of 0, the penny accelerates downward until it reaches terminal velocity; after that, velocity is constant. Exercise: Run the simulation with an initial velocity, downward, that exceeds the penny's terminal velocity. Hint: You can create a new Params object based on an existing one, like this: params2 = Params(params, v_init=-30 * m/s) What do you expect to happen? Plot velocity and position as a function of time, and see if they are consistent with your prediction. End of explanation """ # Solution # Here's a `Params` object with the dimensions of a quarter, # the observed flight time and our initial guess for `v_term` params3 = Params(params, mass = 5.67e-3 * kg, diameter = 24.26e-3 * m, v_term = 18 * m / s, flight_time = 19.1 * s) # Solution # Now we can make a `System` object system3 = make_system(params3) # Solution # Run the simulation results, details = run_ode_solver(system3, slope_func, events=event_func) details # Solution # And get the flight time flight_time = get_last_label(results) * s # Solution # The flight time is a little long, so we could increase `v_term` and try again. # Or we could write an error function def error_func(guess, params): """Final height as a function of C_d. guess: guess at v_term params: Params object returns: height in m """ print(guess) params = Params(params, v_term=guess) system = make_system(params) results, details = run_ode_solver(system, slope_func, events=event_func) flight_time = get_last_label(results) * s error = flight_time - params.flight_time return magnitude(error) # Solution # We can test the error function like this v_guess1 = 18 * m / s error_func(v_guess1, params3) # Solution v_guess2 = 22 * m / s error_func(v_guess2, params3) # Solution # Now we can use `root_scalar` to find the value of `v_term` that yields the measured flight time. res = root_bisect(error_func, [v_guess1, v_guess2], params3) # Solution v_term_solution = res.root # Solution # Plugging in the estimated value, we can use `make_system` to compute `C_d` params_solution = Params(params3, v_term=v_term_solution) system = make_system(params_solution) system.C_d """ Explanation: Exercise: Suppose we drop a quarter from the Empire State Building and find that its flight time is 19.1 seconds. Use this measurement to estimate the terminal velocity. You can get the relevant dimensions of a quarter from https://en.wikipedia.org/wiki/Quarter_(United_States_coin). Create a Params object with the system parameters. We don't know v_term, so we'll start with the inital guess v_term = 18 * m / s. Use make_system to create a System object. Call run_ode_solver to simulate the system. How does the flight time of the simulation compare to the measurement? Try a few different values of t_term and see if you can get the simulated flight time close to 19.1 seconds. Optionally, write an error function and use root_scalar to improve your estimate. Use your best estimate of v_term to compute C_d. Note: I fabricated the observed flight time, so don't take the results of this exercise too seriously. End of explanation """
clausherther/public
Rethinking - Andrew's Spinner.ipynb
cc0-1.0
data = pd.DataFrame([18, 19, 22, float(np.nan), float(np.nan), 19, 20, 22], columns=["frequency"]) k = len(data) p = 1/k k, p data missing_indeces = np.argwhere(np.isnan(data["frequency"])).flatten() missing_indeces """ Explanation: This notebook was inspired by this homework problem post by Richard McElreath: https://twitter.com/rlmcelreath/status/1177554702235525122 And influenced by Baze Petrushev's first take on a solution: https://github.com/petrushev/bayesian-modeling/blob/andrews-spinner/andrews-spinner/andrews_spinner.ipynb We set up the dataframe with the missing values as np.nan End of explanation """ with pm.Model() as model_frequency: α = pm.Exponential("α", 1) β = pm.Exponential("β", .1) λ = pm.Gamma("λ", α, β) spins = pm.Poisson("spins", mu=λ, observed=data["frequency"]) # We create an imputed dataset by filling in the missing variables with our new random variables data_obs_imputed = list(data["frequency"]) for i, m in enumerate(missing_indeces): data_obs_imputed[m] = spins[i] data_obs_imputed = pm.Deterministic("data_obs_imputed", pm.math.stack(data_obs_imputed)) # We use the new total number of spins (including the posteriors of the missing variables) total_spins_imputed = pm.Deterministic("total_spins_imputed", pm.math.sum(data_obs_imputed)) a = np.ones(k)/k theta = pm.Dirichlet("theta", a=a) successes = pm.Multinomial("successes", n=total_spins_imputed, p=theta, observed=data_obs_imputed) model_frequency with model_frequency: trace_frequency = pm.sample(20000, tune=3000, chains=3, random_seed=SEED) with model_frequency: pm.traceplot(trace_frequency, var_names=["spins_missing"], divergences=False, combined=True); with model_frequency: pm.traceplot(trace_frequency, var_names=["theta"], divergences=False, compact=True, combined=True); data["probability"] = trace_frequency["theta"].mean(axis=0) data sns.barplot(x=np.arange(1, k+1), y=np.median(trace_frequency["data_obs_imputed"], axis=0)); az.plot_forest(trace_frequency, var_names=["theta"], kind="ridgeplot", combined=True); _, axes = plt.subplots(4, 2, figsize=(6*2, 4*4)) axes = axes.flatten() for i in range(k): pm.plot_posterior(trace_frequency["theta"][:, i], ax=axes[i], round_to=4, ref_val=1/k) axes[i].set_title(f"P({i+1})") g = sns.jointplot(trace_frequency["theta"][:, 3], trace_frequency["theta"][:, 4], kind="hex", color="k") g.ax_joint.axhline(1/k, linestyle="dotted", linewidth=2, color="k") g.ax_joint.axvline(1/k, linestyle="dotted", linewidth=2, color="k") g.set_axis_labels("theta - missing 4", "theta - missing 5"); """ Explanation: Relying on PyMC3's handling of missing values, we pass the observed data with missing values to a Poisson random variable. PyMC3 creates new random variables for each missing data point, which we can use downstream: End of explanation """
benneely/qdact-basic-analysis
notebooks/comppheno.ipynb
gpl-3.0
import pickle import re dd = pickle.load(open('./python_scripts/02_data_dictionary_dict.p','rb')) #get all variables that begin with 'ESAS' and print variables = list(dd.keys()) variables.sort() pattern = r'\b' + re.escape('ESAS') symptoms = [variables[i] for i, x in enumerate(variables) if re.search(pattern, x)] print(symptoms) """ Explanation: Computable Phenotypes utilizing computers to map observed data to structured output representing a state, condition, or disease status <img src="images/comppheno.jpeg" alt="Smiley face" height="200" width="300" align="left"> Computable Phenotype Description In most research scenarios, the transformation from raw data to some sort of structured output must take place. Much like a clinician listening to a patients symptoms and ascribing a condition based on their observations, a computable phenotype is an algorithm that takes observed raw data and returns a structured response. Provinding a resuable interface to this algorithm helps with reproducibility, dissemination, and accuracy. This notebook will serve as a centralized location to observe and interact with all the computable phenotypes developed as part of this project. As part of this package we will developy a python module qdact_computable_phenotypes in order to disseminate these decision points as best possible. Table of Contents Moderate / Severe Symptoms Set-to-missing Incident Palliative Care <a id='modsevsymp'></a> Moderate / Severe Symptoms In the QDACT data, symptoms are collected and stored as variables whose names begin with 'ESAS'. To see a comprehensive list of variable names for symptoms consider the following code: End of explanation """ from robocomp import Model import numpy as np #Given an input of patient reported symptoms (0-10), report whether the symptom is 'mild' or 'moderate/severe' mod_sev = Model('mod_sev_symptoms') from IPython.display import Image from sklearn.externals.six import StringIO from sklearn import tree import pydotplus dot_data = StringIO() tree.export_graphviz(mod_sev.model, out_file=dot_data, feature_names=['0-10 scaling'], filled=True, rounded=True, special_characters=True) graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) Image(graph.create_png()) inn = [2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 994.0, 0.0, 994.0, 0.0, np.float('nan'), 994.0] mod_sev.scoreIt(inn) """ Explanation: For all of these variables (minus ESASOtherProblem and ESASScore - we should discuss these variables), there is a need to map the raw data (i.e. 0-10, 994,997,999) to structured output that represents a binary indicator: 1. Moderate-to-Severe Symptoms (Yes) 2. No Moderate-to_Severe Symptoms (No) 3. Missing (None) Because this algorithm will be reused and the way data will be attributed to the missing type (None) it is important that we are as transparent and visual about the way in which our algorithm will work End of explanation """ #Given an input of patient reported symptoms (0-10), report whether the symptom is 'mild' or 'moderate/severe' missings = Model('set_missing_codes') ESASAnxiety = [0,5,7,2,8,np.nan,'test',994] missings.scoreIt(ESASAnxiety) """ Explanation: <a id='set-to-missing'></a> Set-to-missing *Often, QDACT provides users with responses that are not useful for analysis. This algorithm will be used to help clean the data first to ensure that all non-useful responses are removed. One example is with the response 'Patient unable to respond' - in the data set, this response is stored as 994. Because we will treat this variable as numeric, we need to set this (and all answers like it) to missing. Example In the following example, I will show how this algorithm will operate on the ESASAnxiety variable. End of explanation """
dirkseidensticker/CARD
Python/aDRACtoOxCal.ipynb
mit
%matplotlib inline from IPython.display import display import pandas as pd """ Explanation: Conversion to OxCal-compliant output Archives des datations radiocarbone d'Afrique centrale Dirk Seidensticker see: https://c14.arch.ox.ac.uk/embed.php?File=oxcal.html End of explanation """ df = pd.read_csv("https://raw.githubusercontent.com/dirkseidensticker/aDRAC/master/data/aDRAC.csv", encoding='utf8') display(df.head()) """ Explanation: Conversion of the Data into OxCal-usable Form End of explanation """ df_sub = df.head() """ Explanation: Choosing only the first five entries as subsample: End of explanation """ print('''Plot() {''') for index, row in df_sub.iterrows(): print('R_Date("', row['SITE'],'/', row['FEATURE'], '-', row['LABNR'],'",', row['C14AGE'],',', row['C14STD'],');') print('};') """ Explanation: OxCal-compliant output: End of explanation """
nehal96/Deep-Learning-ND-Exercises
DCGAN/DCGAN.ipynb
mit
%matplotlib inline import pickle as pkl import matplotlib.pyplot as plt import numpy as np from scipy.io import loadmat import tensorflow as tf !mkdir data """ Explanation: Deep Convolutional GANs In this notebook, you'll build a GAN using convolutional layers in the generator and discriminator. This is called a Deep Convolutional GAN, or DCGAN for short. The DCGAN architecture was first explored last year and has seen impressive results in generating new images, you can read the original paper here. You'll be training DCGAN on the Street View House Numbers (SVHN) dataset. These are color images of house numbers collected from Google street view. SVHN images are in color and much more variable than MNIST. So, we'll need a deeper and more powerful network. This is accomplished through using convolutional layers in the discriminator and generator. It's also necessary to use batch normalization to get the convolutional networks to train. The only real changes compared to what you saw previously are in the generator and discriminator, otherwise the rest of the implementation is the same. End of explanation """ from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm data_dir = 'data/' if not isdir(data_dir): raise Exception("Data directory doesn't exist!") class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(data_dir + "train_32x32.mat"): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='SVHN Training Set') as pbar: urlretrieve( 'http://ufldl.stanford.edu/housenumbers/train_32x32.mat', data_dir + 'train_32x32.mat', pbar.hook) if not isfile(data_dir + "test_32x32.mat"): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='SVHN Training Set') as pbar: urlretrieve( 'http://ufldl.stanford.edu/housenumbers/test_32x32.mat', data_dir + 'test_32x32.mat', pbar.hook) """ Explanation: Getting the data Here you can download the SVHN dataset. Run the cell above and it'll download to your machine. End of explanation """ trainset = loadmat(data_dir + 'train_32x32.mat') testset = loadmat(data_dir + 'test_32x32.mat') """ Explanation: These SVHN files are .mat files typically used with Matlab. However, we can load them in with scipy.io.loadmat which we imported above. End of explanation """ idx = np.random.randint(0, trainset['X'].shape[3], size=36) fig, axes = plt.subplots(6, 6, sharex=True, sharey=True, figsize=(5,5),) for ii, ax in zip(idx, axes.flatten()): ax.imshow(trainset['X'][:,:,:,ii], aspect='equal') ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) plt.subplots_adjust(wspace=0, hspace=0) """ Explanation: Here I'm showing a small sample of the images. Each of these is 32x32 with 3 color channels (RGB). These are the real images we'll pass to the discriminator and what the generator will eventually fake. End of explanation """ def scale(x, feature_range=(-1, 1)): # scale to (0, 1) x = ((x - x.min())/(255 - x.min())) # scale to feature_range min, max = feature_range x = x * (max - min) + min return x class Dataset: def __init__(self, train, test, val_frac=0.5, shuffle=False, scale_func=None): split_idx = int(len(test['y'])*(1 - val_frac)) self.test_x, self.valid_x = test['X'][:,:,:,:split_idx], test['X'][:,:,:,split_idx:] self.test_y, self.valid_y = test['y'][:split_idx], test['y'][split_idx:] self.train_x, self.train_y = train['X'], train['y'] self.train_x = np.rollaxis(self.train_x, 3) self.valid_x = np.rollaxis(self.valid_x, 3) self.test_x = np.rollaxis(self.test_x, 3) if scale_func is None: self.scaler = scale else: self.scaler = scale_func self.shuffle = shuffle def batches(self, batch_size): if self.shuffle: idx = np.arange(len(dataset.train_x)) np.random.shuffle(idx) self.train_x = self.train_x[idx] self.train_y = self.train_y[idx] n_batches = len(self.train_y)//batch_size for ii in range(0, len(self.train_y), batch_size): x = self.train_x[ii:ii+batch_size] y = self.train_y[ii:ii+batch_size] yield self.scaler(x), self.scaler(y) """ Explanation: Here we need to do a bit of preprocessing and getting the images into a form where we can pass batches to the network. First off, we need to rescale the images to a range of -1 to 1, since the output of our generator is also in that range. We also have a set of test and validation images which could be used if we're trying to identify the numbers in the images. End of explanation """ def model_inputs(real_dim, z_dim): inputs_real = tf.placeholder(tf.float32, (None, *real_dim), name='input_real') inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z') return inputs_real, inputs_z """ Explanation: Network Inputs Here, just creating some placeholders like normal. End of explanation """ def generator(z, output_dim, reuse=False, alpha=0.2, training=True): with tf.variable_scope('generator', reuse=reuse): # First fully connected layer x1 = tf.layers.dense(z, 4*4*512) # Reshape it to start the convolutional stack x1 = tf.reshape(x1, (-1, 4, 4, 512)) x1 = tf.layers.batch_normalization(x1, training=training) x1 = tf.maximum(alpha * x1, x1) # 4x4x512 now x2 = tf.layers.conv2d_transpose(x1, 256, 5, strides=2, padding='same') x2 = tf.layers.batch_normalization(x2, training=training) x2 = tf.maximum(alpha * x2, x2) # 8x8x256 now x3 = tf.layers.conv2d_transpose(x2, 128, 5, strides=2, padding='same') x3 = tf.layers.batch_normalization(x3, training=training) x3 = tf.maximum(alpha * x3, x3) # 16x16x128 now # Output layer logits = tf.layers.conv2d_transpose(x3, output_dim, 5, strides=2, padding='same') # 32x32x3 now out = tf.tanh(logits) return out """ Explanation: Generator Here you'll build the generator network. The input will be our noise vector z as before. Also as before, the output will be a $tanh$ output, but this time with size 32x32 which is the size of our SVHN images. What's new here is we'll use convolutional layers to create our new images. The first layer is a fully connected layer which is reshaped into a deep and narrow layer, something like 4x4x1024 as in the original DCGAN paper. Then we use batch normalization and a leaky ReLU activation. Next is a transposed convolution where typically you'd halve the depth and double the width and height of the previous layer. Again, we use batch normalization and leaky ReLU. For each of these layers, the general scheme is convolution > batch norm > leaky ReLU. You keep stack layers up like this until you get the final transposed convolution layer with shape 32x32x3. Below is the archicture used in the original DCGAN paper: Note that the final layer here is 64x64x3, while for our SVHN dataset, we only want it to be 32x32x3. End of explanation """ def discriminator(x, reuse=False, alpha=0.2): with tf.variable_scope('discriminator', reuse=reuse): # Input layer is 32x32x3 x1 = tf.layers.conv2d(x, 64, 5, strides=2, padding='same') relu1 = tf.maximum(alpha * x1, x1) # 16x16x32 x2 = tf.layers.conv2d(relu1, 128, 5, strides=2, padding='same') bn2 = tf.layers.batch_normalization(x2, training=True) relu2 = tf.maximum(alpha * bn2, bn2) # 8x8x128 x3 = tf.layers.conv2d(relu2, 256, 5, strides=2, padding='same') bn3 = tf.layers.batch_normalization(x3, training=True) relu3 = tf.maximum(alpha * bn3, bn3) # 4x4x256 # Flatten it flat = tf.reshape(relu3, (-1, 4*4*256)) logits = tf.layers.dense(flat, 1) out = tf.sigmoid(logits) return out, logits """ Explanation: Discriminator Here you'll build the discriminator. This is basically just a convolutional classifier like you've build before. The input to the discriminator are 32x32x3 tensors/images. You'll want a few convolutional layers, then a fully connected layer for the output. As before, we want a sigmoid output, and you'll need to return the logits as well. For the depths of the convolutional layers I suggest starting with 16, 32, 64 filters in the first layer, then double the depth as you add layers. Note that in the DCGAN paper, they did all the downsampling using only strided convolutional layers with no maxpool layers. You'll also want to use batch normalization with tf.layers.batch_normalization on each layer except the first convolutional and output layers. Again, each layer should look something like convolution > batch norm > leaky ReLU. Note: in this project, your batch normalization layers will always use batch statistics. (That is, always set training to True.) That's because we are only interested in using the discriminator to help train the generator. However, if you wanted to use the discriminator for inference later, then you would need to set the training parameter appropriately. End of explanation """ def model_loss(input_real, input_z, output_dim, alpha=0.2): """ Get the loss for the discriminator and generator :param input_real: Images from the real dataset :param input_z: Z input :param out_channel_dim: The number of channels in the output image :return: A tuple of (discriminator loss, generator loss) """ g_model = generator(input_z, output_dim, alpha=alpha) d_model_real, d_logits_real = discriminator(input_real, alpha=alpha) d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, alpha=alpha) d_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_model_real))) d_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake))) g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake))) d_loss = d_loss_real + d_loss_fake return d_loss, g_loss """ Explanation: Model Loss Calculating the loss like before, nothing new here. End of explanation """ def model_opt(d_loss, g_loss, learning_rate, beta1): """ Get optimization operations :param d_loss: Discriminator loss Tensor :param g_loss: Generator loss Tensor :param learning_rate: Learning Rate Placeholder :param beta1: The exponential decay rate for the 1st moment in the optimizer :return: A tuple of (discriminator training operation, generator training operation) """ # Get weights and bias to update t_vars = tf.trainable_variables() d_vars = [var for var in t_vars if var.name.startswith('discriminator')] g_vars = [var for var in t_vars if var.name.startswith('generator')] # Optimize with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars) g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars) return d_train_opt, g_train_opt """ Explanation: Optimizers Not much new here, but notice how the train operations are wrapped in a with tf.control_dependencies block so the batch normalization layers can update their population statistics. End of explanation """ class GAN: def __init__(self, real_size, z_size, learning_rate, alpha=0.2, beta1=0.5): tf.reset_default_graph() self.input_real, self.input_z = model_inputs(real_size, z_size) self.d_loss, self.g_loss = model_loss(self.input_real, self.input_z, real_size[2], alpha=0.2) self.d_opt, self.g_opt = model_opt(self.d_loss, self.g_loss, learning_rate, 0.5) """ Explanation: Building the model Here we can use the functions we defined about to build the model as a class. This will make it easier to move the network around in our code since the nodes and operations in the graph are packaged in one object. End of explanation """ def view_samples(epoch, samples, nrows, ncols, figsize=(5,5)): fig, axes = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols, sharey=True, sharex=True) for ax, img in zip(axes.flatten(), samples[epoch]): ax.axis('off') img = ((img - img.min())*255 / (img.max() - img.min())).astype(np.uint8) ax.set_adjustable('box-forced') im = ax.imshow(img, aspect='equal') plt.subplots_adjust(wspace=0, hspace=0) return fig, axes """ Explanation: Here is a function for displaying generated images. End of explanation """ def train(net, dataset, epochs, batch_size, print_every=10, show_every=100, figsize=(5,5)): saver = tf.train.Saver() sample_z = np.random.uniform(-1, 1, size=(72, z_size)) samples, losses = [], [] steps = 0 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(epochs): for x, y in dataset.batches(batch_size): steps += 1 # Sample random noise for G batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size)) # Run optimizers _ = sess.run(net.d_opt, feed_dict={net.input_real: x, net.input_z: batch_z}) _ = sess.run(net.g_opt, feed_dict={net.input_z: batch_z, net.input_real: x}) if steps % print_every == 0: # At the end of each epoch, get the losses and print them out train_loss_d = net.d_loss.eval({net.input_z: batch_z, net.input_real: x}) train_loss_g = net.g_loss.eval({net.input_z: batch_z}) print("Epoch {}/{}...".format(e+1, epochs), "Discriminator Loss: {:.4f}...".format(train_loss_d), "Generator Loss: {:.4f}".format(train_loss_g)) # Save losses to view after training losses.append((train_loss_d, train_loss_g)) if steps % show_every == 0: gen_samples = sess.run( generator(net.input_z, 3, reuse=True, training=False), feed_dict={net.input_z: sample_z}) samples.append(gen_samples) _ = view_samples(-1, samples, 6, 12, figsize=figsize) plt.show() saver.save(sess, './checkpoints/generator.ckpt') with open('samples.pkl', 'wb') as f: pkl.dump(samples, f) return losses, samples """ Explanation: And another function we can use to train our network. Notice when we call generator to create the samples to display, we set training to False. That's so the batch normalization layers will use the population statistics rather than the batch statistics. Also notice that we set the net.input_real placeholder when we run the generator's optimizer. The generator doesn't actually use it, but we'd get an errror without it because of the tf.control_dependencies block we created in model_opt. End of explanation """ real_size = (32,32,3) z_size = 100 learning_rate = 0.0002 batch_size = 128 epochs = 25 alpha = 0.2 beta1 = 0.5 # Create the network net = GAN(real_size, z_size, learning_rate, alpha=alpha, beta1=beta1) dataset = Dataset(trainset, testset) losses, samples = train(net, dataset, epochs, batch_size, figsize=(10,5)) fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator', alpha=0.5) plt.plot(losses.T[1], label='Generator', alpha=0.5) plt.title("Training Losses") plt.legend() fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator', alpha=0.5) plt.plot(losses.T[1], label='Generator', alpha=0.5) plt.title("Training Losses") plt.legend() _ = view_samples(-1, samples, 6, 12, figsize=(10,5)) _ = view_samples(-1, samples, 6, 12, figsize=(10,5)) """ Explanation: Hyperparameters GANs are very senstive to hyperparameters. A lot of experimentation goes into finding the best hyperparameters such that the generator and discriminator don't overpower each other. Try out your own hyperparameters or read the DCGAN paper to see what worked for them. End of explanation """
WNoxchi/Kaukasos
pytorch/LSTM GloVe dropout - PyTorch - incomplete.ipynb
mit
import pathlib import os import torchtext # from torchtext.data import Field from torchtext import data # import spacy import pandas as pd import numpy as np # from torchtext.data import TabularDataset """ Explanation: PyTorch LSTM: GloVe + dropout --- Incomplete This is a reimplementation of J.Howard's Improved LSTM baseline: GloVe + dropout Kaggle kernel in FastAI/PyTorch. The original kernel manages a private score of 0.09783 tied with 2747/4551 place. -- Wayne Nixalo Imports End of explanation """ data_path = pathlib.Path('../../data') comp_path = pathlib.Path(data_path/'competitions/jigsaw-toxic-comment-classification-challenge') EMBEDDING_FILE = 'glove/glove.6B.50d.txt' TRAIN_DATA_FILE= 'train.csv' TEST_DATA_FILE = 'test.csv' """ Explanation: Paths End of explanation """ embed_sz = 50 # embedding vector columns (factors) max_feat = 20000 # embedding vector rows (words) maxlen = 100 # words in comment to use """ Explanation: Config parameters End of explanation """ list_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"] # train = pd.read_csv(comp_path/TRAIN_DATA_FILE) # test = pd.read_csv(comp/TEST_DATA_FILE) # SEE: Aside 1, Aside 2 # TEXT = Field(sequential=True, tokenize='spacy', lower=True) TEXT = data.Field(sequential=True, tokenize= lambda x: x.split(), lower=True, ) LABEL = data.Field(sequential=False, use_vocab=False) # trainval_datafields = [("id",None),("comment_text",TEXT)] # trainval_datafields.extend((clss, LABEL) for clss in list_classes) # test_datafields = [("id",None), ("comment_text",TEXT)] # train_dataset = data.TabularDataset( # path=comp_path/TRAIN_DATA_FILE, format='csv', # skip_header=True, fields=trainval_datafields, # sort_within_batch=True) # test_dataset = data.TabularDataset( # path=comp_path/TEST_DATA_FILE, format='csv', # skip_header=True, fields=test_datafields) # # TEXT.build_vocab(train_dataset) """ Explanation: Data Loading End of explanation """ from fastai.nlp import * train_df = pd.read_csv(comp_path/TRAIN_DATA_FILE) # SEE: Aside 3 model = LanguageModelData.from_dataframes( path=comp_path, field=TEXT, col="comment_text", train_df=train_df, val_df=train_df, test_df=train_df, bs=64, min_freq=3) em_sz = 200 nh = 500 nl = 3 opt_fn = partial(optim.Adam, betas=(0.7, 0.99)) learner = model.get_model(opt_fn, em_sz, nh, nl, dropouti=0.05, dropout=0.05, wdrop=0.1, dropoute=0.02, dropouth=0.05) learner.clip = 0.3 # gradient clipping learner.model.parameters """ Explanation: experimenting with Fastai End of explanation """ list_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"] train = pd.read_csv(comp_path/TRAIN_DATA_FILE) # test = pd.read_csv(comp/TEST_DATA_FILE) train[list_classes][55:65] """ Explanation: Misc / Asides / Notes Aside 1 Labels are already binary encoded, so no need to numericalize. Therefore use_vocab=False. End of explanation """
crocha700/UpperOceanSeasonality
notebooks/LLCProcessing.ipynb
cc0-1.0
import datetime import numpy as np import scipy as sp from scipy import interpolate import matplotlib.pyplot as plt %matplotlib inline import cmocean import seawater as sw from netCDF4 import Dataset from llctools import llc_model from pyspec import spectrum as spec c1 = 'slateblue' c2 = 'tomato' c3 = 'k' c4 = 'indigo' plt.rcParams['lines.linewidth'] = 1.5 ap = .75 plt.style.use('seaborn-colorblind') def leg_width(lg,fs): """" Sets the linewidth of each legend object """ for legobj in lg.legendHandles: legobj.set_linewidth(fs) def parse_time(times): """ Converts an array of strings that defines the LLC outputs into datatime arrays, e.g., '20110306T010000' --> datetime.datetime(2011, 3, 6, 1, 0) Input ------ times: array of strings that define LLC model time Output ------ time: array of datetime associated with times """ time = [] for i in range(times.size): yr = times[i][:4] mo = times[i][4:6] day = times[i][6:8] hr = times[i][9:11] time.append(datetime.datetime(int(yr),int(mo),int(day),int(hr))) return np.array(time) grid_path = '../data/llc/2160/grid/' data_path = '../data/llc/2160/uv/' # Kuroshio Extension model class m = llc_model.LLCRegion(grid_dir=grid_path,data_dir=data_path,Nlon=480,Nlat=466,Nz=1) m.load_grid() # model sub-region surface fields files fileu = m.data_dir+'U_480x466x1.20110308T220000' filev = m.data_dir+'V_480x466x1.20110308T220000' fileeta = m.data_dir[:-3]+'Eta/Eta_480x466x1.20110308T220000' time_string = fileu[-15:] time=llc_model.parse_time(time_string) time # important note: U,V are relative to the LLC model grid, # not geostrophical coordinates. Thus, on # faces 4 and 5, U = meridional component # and V = -zonal component (see Dimitris's llc.readme). u, v, eta = m.load_2d_data(filev), -m.load_2d_data(fileu), m.load_2d_data(fileeta) lon,lat = m.lon[m.Nlat//2],m.lat[:,m.Nlon//2] # create a regular Cartesian grid dd = 6. # grid spacing [km] dlon = dd/111.320*np.cos(np.abs(m.lat[m.Nlat//2,m.Nlon//2])*np.pi/180.) dlat = dd/110.574 lonimin,lonimax = lon.min()+dlon,lon.max()-dlon latimin,latimax = lat.min()+dlat,lat.max()-dlat loni = np.arange(m.lon.min(),m.lon.max()+dlon,dlon) lati = np.arange(m.lat.min(),m.lat.max()+dlat,dlat) long,latg = np.meshgrid(loni,lati) f0 = sw.f(latg) interpu, interpv, interpeta = sp.interpolate.interp2d(lon,lat,u), sp.interpolate.interp2d(lon,lat,v), sp.interpolate.interp2d(lon,lat,eta) ui, vi,etai = interpu(loni,lati), interpv(loni,lati), interpeta(loni,lati) """ Explanation: This notebook showcases the analysis applied to LLC outputs. Here the calculations are performed for a single snapshot. The full LLC model outputs can be obtained from the ECCO Project. All fields used in this paper take about 700 GB! The analysis leverage on other pieces of code developed by the first author: llctools and pyspec. End of explanation """ def calc_gradu(u,v,dd = 6.): uy,ux = np.gradient(u,dd,dd) vy,vx = np.gradient(v,dd,dd) vort, div, strain = (vx - uy), ux+vy, ( (ux-vy)**2 + (vx+uy)**2 )**.5 return vort, div, strain # double mirror ui and vi def double_mirror(a,forward='True'): if forward: A = np.hstack([a,np.fliplr(a)]) A = np.vstack([A,np.fliplr(A)]) else: iy,ix = a.shape A = a[:iy//2,:ix//2] return A def calc_gradu2(u,v,dd = 6.): u, v = double_mirror(u), double_mirror(v) iy,ix = u.shape Lx, Ly = (ix-1)*dd, (iy-1)*dd dk = 1./Lx dl = 1./Ly l = 2*np.pi*dl*np.append( np.arange(0.,iy//2), np.arange(-iy//2,0.) ) k = 2*np.pi*dk*np.arange(0.,ix//2+1) k,l = np.meshgrid(k,l) uh, vh = np.fft.rfft2(u), np.fft.rfft2(v) ux, uy = np.fft.irfft2(1j*k*uh), np.fft.irfft2(1j*l*uh) vx, vy = np.fft.irfft2(1j*k*vh), np.fft.irfft2(1j*l*vh) vort, div, strain = (vx - uy), ux+vy, ( (ux-vy)**2 + (vx+uy)**2 )**.5 return vort, div, strain def rms(field): return ((field**2).mean())**.5 vort, div, strain = calc_gradu(ui,vi,dd = 6.e3) vort, div, strain = vort/f0, div/f0, strain/f0 vort2, div2, strain2 = calc_gradu2(ui,vi,dd = 6.e3) vort2,div2, strain2 = double_mirror(vort2,forward=False),double_mirror(div2,forward=False), double_mirror(strain2,forward=False) vort2, div2, strain2 = vort2/f0, div2/f0, strain2/f0 vort.mean()/np.abs(vort).max(), div.mean()/np.abs(div).max(), strain.mean()/np.abs(strain).max() vort2.mean()/np.abs(vort2).max(), div2.mean()/np.abs(div2).max(), strain2.mean()/np.abs(strain2).max() """ Explanation: Vorticity, divergence, and rate of strain End of explanation """ fig = plt.figure(figsize=(14,4)) cv = np.linspace(-1.5,1.5,20) cd = np.linspace(-.5,.5,20) cs = np.linspace(0.,1.5,10) ax = fig.add_subplot(131) plt.contourf(vort,cv,vmin=cv.min(),vmax=cv.max(),cmap=cmocean.cm.balance,extend='both') plt.title('vorticity, rms = %f' % rms(vort)) #plt.colorbar() plt.xticks([]); plt.yticks([]) ax = fig.add_subplot(132) plt.contourf(vort2,cv,vmin=cv.min(),vmax=cv.max(),cmap=cmocean.cm.balance,extend='both') plt.title('vorticity, rms = %f' % rms(vort2)) #plt.colorbar() plt.xticks([]); plt.yticks([]) fig = plt.figure(figsize=(14,4)) ax = fig.add_subplot(131) plt.contourf(div,cd,vmin=cd.min(),vmax=cd.max(),cmap=cmocean.cm.balance,extend='both') plt.title('divergence, rms = %f' % rms(div)) #plt.colorbar() plt.xticks([]); plt.yticks([]) ax = fig.add_subplot(132) plt.contourf(div2,cd,vmin=cd.min(),vmax=cd.max(),cmap=cmocean.cm.balance,extend='both') plt.title('divergence, rms = %f' % rms(div2)) #plt.colorbar() plt.xticks([]); plt.yticks([]) fig = plt.figure(figsize=(14,4)) ax = fig.add_subplot(131) plt.contourf(strain,cs,vmin=cs.min(),vmax=cs.max(),cmap=cmocean.cm.amp,extend='both') plt.title('divergence, rms = %f' % rms(strain)) #plt.colorbar() plt.xticks([]); plt.yticks([]) ax = fig.add_subplot(132) plt.contourf(strain2,cs,vmin=cs.min(),vmax=cs.max(),cmap=cmocean.cm.amp,extend='both') plt.title('strain, rms = %f' % rms(strain2)) #plt.colorbar() plt.xticks([]); plt.yticks([]) stats_4320 = np.load(__depends__[1]) stats_2160 = np.load(__depends__[2]) llc = Dataset(__depends__[0]) time2160 = parse_time(llc['2160']['hourly']['time'][:]) timed2160 = time2160[::24] time4320 = parse_time(llc['4320']['hourly']['time'][:]) timed4320 = time4320[::24] """ Explanation: Discretization error End of explanation """ cv = np.linspace(-1.5,1.5,20) cd = np.linspace(-.5,.5,20) cs = np.linspace(0.,1.5,10) fig = plt.figure(figsize=(19,4)) ax = fig.add_subplot(131) plt.contourf(vort,cv,vmin=cv.min(),vmax=cv.max(),cmap='RdBu_r',extend='both') plt.title('vorticity, rms = %f' % rms(vort)) plt.colorbar() plt.xticks([]); plt.yticks([]) ax = fig.add_subplot(132) plt.title('divergence, rms = %f' % rms(div)) plt.contourf(div,cd,vmin=cd.min(),vmax=cd.max(),cmap='RdBu_r',extend='both') plt.colorbar() plt.xticks([]); plt.yticks([]) ax = fig.add_subplot(133) plt.title('strain rate, rms %f' % rms(strain)) plt.contourf(strain,cs,vmax=cs.max(),cmap='viridis',extend='max') plt.colorbar() plt.xticks([]); plt.yticks([]) """ Explanation: Quick-and-dirty, sanity-check plots End of explanation """ specU = spec.TWODimensional_spec(ui.copy(),d1=dd,d2=dd) specV = spec.TWODimensional_spec(vi.copy(),d1=dd,d2=dd) specEta = spec.TWODimensional_spec(etai.copy(),d1=dd,d2=dd) iEu,iEv, iEeta = specU.ispec,specV.ispec, specEta.ispec iE = 0.5*(iEu+iEv) kr = np.array([1.e-4,1.]) e2 = kr**-2/1.e4 e3 = kr**-3/1.e7 e5 = kr**-5/1.e9 fig = plt.figure(figsize=(12,4)) ax = fig.add_subplot(121) plt.loglog(specU.ki,iE) plt.loglog(kr,12.*e2,'.5',linewidth=2); plt.text(1/17.5,5.e-1,'-2',fontsize=14) plt.loglog(kr,35*e3,'.5',linewidth=2); plt.text(1/30.,2.e-2,'-3',fontsize=14) plt.xlim(1.e-3,1.e-1) plt.ylim(1.e-2,1.e2) plt.xlabel('Wavenumber [cpkm]') plt.ylabel(r'KE density [m$^2$ s$^{-2}$/cpkm]') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.45, hspace=None) ax = fig.add_subplot(122) plt.loglog(specEta.ki,iEeta) plt.loglog(kr,e2/.5e1,'.5',linewidth=2); plt.text(1/17.5,1.e-2,'-2',fontsize=14) plt.loglog(kr,3*e5/1.5e2,'.5',linewidth=2); plt.text(1/25.5,1.e-5,'-5',fontsize=14) plt.xlim(1.e-3,1.e-1) plt.ylim(1.e-6,1.e2) plt.ylabel(r'SSH variance density [m$^2$/cpkm]') plt.xlabel('Wavenumber [cpkm]') """ Explanation: Spectra End of explanation """
lilleswing/deepchem
examples/tutorials/15_Training_a_Generative_Adversarial_Network_on_MNIST.ipynb
mit
!curl -Lo conda_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py import conda_installer conda_installer.install() !/root/miniconda/bin/conda info -e !pip install --pre deepchem import deepchem deepchem.__version__ """ Explanation: Tutorial Part 15: Training a Generative Adversarial Network on MNIST In this tutorial, we will train a Generative Adversarial Network (GAN) on the MNIST dataset. This is a large collection of 28x28 pixel images of handwritten digits. We will try to train a network to produce new images of handwritten digits. Colab This tutorial and the rest in this sequence are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link. Setup To run DeepChem within Colab, you'll need to run the following cell of installation commands. This will take about 5 minutes to run to completion and install your environment. End of explanation """ import deepchem as dc import tensorflow as tf from deepchem.models.optimizers import ExponentialDecay from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Dense, Reshape import matplotlib.pyplot as plot import matplotlib.gridspec as gridspec %matplotlib inline mnist = tf.keras.datasets.mnist.load_data(path='mnist.npz') images = mnist[0][0].reshape((-1, 28, 28, 1))/255 dataset = dc.data.NumpyDataset(images) """ Explanation: To begin, let's import all the libraries we'll need and load the dataset (which comes bundled with Tensorflow). End of explanation """ def plot_digits(im): plot.figure(figsize=(3, 3)) grid = gridspec.GridSpec(4, 4, wspace=0.05, hspace=0.05) for i, g in enumerate(grid): ax = plot.subplot(g) ax.set_xticks([]) ax.set_yticks([]) ax.imshow(im[i,:,:,0], cmap='gray') plot_digits(images) """ Explanation: Let's view some of the images to get an idea of what they look like. End of explanation """ class DigitGAN(dc.models.WGAN): def get_noise_input_shape(self): return (10,) def get_data_input_shapes(self): return [(28, 28, 1)] def create_generator(self): return tf.keras.Sequential([ Dense(7*7*8, activation=tf.nn.relu), Reshape((7, 7, 8)), Conv2DTranspose(filters=16, kernel_size=5, strides=2, activation=tf.nn.relu, padding='same'), Conv2DTranspose(filters=1, kernel_size=5, strides=2, activation=tf.sigmoid, padding='same') ]) def create_discriminator(self): return tf.keras.Sequential([ Conv2D(filters=32, kernel_size=5, strides=2, activation=tf.nn.leaky_relu, padding='same'), Conv2D(filters=64, kernel_size=5, strides=2, activation=tf.nn.leaky_relu, padding='same'), Dense(1, activation=tf.math.softplus) ]) gan = DigitGAN(learning_rate=ExponentialDecay(0.001, 0.9, 5000)) """ Explanation: Now we can create our GAN. Like in the last tutorial, it consists of two parts: The generator takes random noise as its input and produces output that will hopefully resemble the training data. The discriminator takes a set of samples as input (possibly training data, possibly created by the generator), and tries to determine which are which. This time we will use a different style of GAN called a Wasserstein GAN (or WGAN for short). In many cases, they are found to produce better results than conventional GANs. The main difference between the two is in the discriminator (often called a "critic" in this context). Instead of outputting the probability of a sample being real training data, it tries to learn how to measure the distance between the training distribution and generated distribution. That measure can then be directly used as a loss function for training the generator. We use a very simple model. The generator uses a dense layer to transform the input noise into a 7x7 image with eight channels. That is followed by two convolutional layers that upsample it first to 14x14, and finally to 28x28. The discriminator does roughly the same thing in reverse. Two convolutional layers downsample the image first to 14x14, then to 7x7. A final dense layer produces a single number as output. In the last tutorial we used a sigmoid activation to produce a number between 0 and 1 that could be interpreted as a probability. Since this is a WGAN, we instead use a softplus activation. It produces an unbounded positive number that can be interpreted as a distance. End of explanation """ def iterbatches(epochs): for i in range(epochs): for batch in dataset.iterbatches(batch_size=gan.batch_size): yield {gan.data_inputs[0]: batch[0]} gan.fit_gan(iterbatches(100), generator_steps=0.2, checkpoint_interval=5000) """ Explanation: Now to train it. As in the last tutorial, we write a generator to produce data. This time the data is coming from a dataset, which we loop over 100 times. One other difference is worth noting. When training a conventional GAN, it is important to keep the generator and discriminator in balance thoughout training. If either one gets too far ahead, it becomes very difficult for the other one to learn. WGANs do not have this problem. In fact, the better the discriminator gets, the cleaner a signal it provides and the easier it becomes for the generator to learn. We therefore specify generator_steps=0.2 so that it will only take one step of training the generator for every five steps of training the discriminator. This tends to produce faster training and better results. End of explanation """ plot_digits(gan.predict_gan_generator(batch_size=16)) """ Explanation: Let's generate some data and see how the results look. End of explanation """
dereneaton/ipyrad
testdocs/analysis/cookbook-digest_genomes.ipynb
gpl-3.0
# conda install ipyrad -c conda-forge -c bioconda import ipyrad.analysis as ipa """ Explanation: <span style="color:gray">ipyrad-analysis toolkit: </span> digest genomes The purpose of this tool is to digest a genome file in silico using the same restriction enzymes that were used for an empirical data set to attempt to extract homologous data from the genome file. This can be a useful procedure for adding additional outgroup samples to a data set. Required software End of explanation """ genome = "/home/deren/Downloads/Ahypochondriacus_459_v2.0.fa" """ Explanation: A genome file You will need a genome file in fasta format (optionally it can be gzip compressed). End of explanation """ digest = ipa.digest_genome( fasta=genome, name="amaranthus-digest", workdir="digested_genomes", re1="CTGCAG", re2="AATTC", ncopies=10, readlen=150, min_size=300, max_size=500, nscaffolds=12, ) digest.run() """ Explanation: Initialize the tool (e.g., ddRAD) You can generate single or paired-end data, and you will likely want to restrict the size of selected fragments to be within an expected size selection window, as is typically done in empirical data sets. Here I select all fragments occuring between two restriction enzymes where the intervening fragment is 300-500bp in length. I then ask that the analysis returns the digested fragments as 150bp fastq reads, and to provide 10 copies of each one. I also restrict it to only the first (largest) 12 scaffolds using the 'nscaffolds' arg. End of explanation """ ! ls -l digested_genomes/ """ Explanation: Check results End of explanation """ digest = ipa.digest_genome( fasta=genome, name="amaranthus-digest-RAD", workdir="digested_genomes", re1="CTGCAG", re2=None, paired=False, ncopies=10, readlen=100, min_size=300, max_size=500, nscaffolds=12, ) digest.run() """ Explanation: Example 2 (original RAD data) The original RAD method uses sonication rather than a second restriction digestion to cut all of the fragments down to an appropriate size for sequencing. Thus you only need to provide a single cut site and a selection window. End of explanation """
skorokithakis/pythess-files
014 - Lorde/tao_mro/tao_of_python.ipynb
mit
two = 2 print(type(two)) print(type(type(two))) print(type(two).__bases__) print(dir(two)) """ Explanation: The Tao of Python The intricate relationship between "object" and "type" and how metaclasses, classes and instances are related <img src="figures/yin_yang.png" style="display:block;margin:auto;width:60%;"/> About me Diploma Biology MSc Nanosciences and Nanotechnologies (VB .NET) PhD in Computer Simulations in Complex Networks (C#) Postdoc Researcher in Systems Biology (Heidelberg, Germany): "Constraint based modelling in biological networks" (Java) Postdoc Researcher in Bioinformatics (London): "Computer simulations and mathematical programming in biological networks" (Python) Now: Full stack web development with Django/DRF/Polymer/Web Components (Python, ES6, HTML, CSS) Contents of this talk Object-oriented relationships Relationship rules What is a Python object? Classes as objects Metaclasses What is type? What is object? How are type and object related? The Python objects map Most of this talk is based on this article by Shalabh Chaturvedi. Why is this talk useful? Actually it's not terribly useful Deep understanding of the Python object model Clarification of the role and behavior of classes, metaclasses and instances Appreciation of the language on different level Zen-like satisfying moment of understanding Bragging rights :D Object-oriented relationships While we introduce many different objects, we only use two kinds of relationships: 1. is a kind of (solid line): Also known as specialization or inheritance, this relationship exists between two objects when one (the subclass) is a specialized version of the other (the superclass). A snake is a kind of reptile. It has all the traits of a reptile and some specific traits which identify a snake. Terms used: subclass of, superclass of, superclass-subclass or simply is a. 2. is an instance of (dashed line): Also known as instantiation, this relationship exists between two objects when one (the instance) is a concrete example of what the other specifies (the type). I have a pet snake named Squasher. Squasher is an instance of a snake. Terms used: instance of, type of. <img src="figures/oo_relationships.png" style="display:block;margin:auto;width:90%;"/> Relationship rules If A is a subclass of B, and B is a subclass of C, then A is a subclass of C. If X is an instance of A, and A is a subclass of B, then X is an instance of B. If B is an instance of M, and A is a subclass of B, then A is an instance of M. <img src="figures/rules_123.png" style="display:block;margin:auto;width:90%;"/> What is a Python object? An object is an entity with the following characteristic properties: 1. Identity (i.e. given two names we can say for sure if they refer to one and the same object, or not). 2. A value - which may include a bunch of attributes (i.e. we can reach other objects through objectname.attributename). 3. A type - every object has exactly one type. For instance, the object 2 has a type int and the object "joe" has a type string. 4. One or more bases. A base is similar to a super-class or base-class in object-oriented lingo. End of explanation """ class A: pass a = A() print(type(a)) print(type(A)) print(A.__bases__) A = type('A', (), {}) a = A() print(type(a)) print(type(A)) print(A.__bases__) print(isinstance(a, A), isinstance(a, object), issubclass(A, object)) def f(): """My name is f.""" pass print(type(f)) print(type(type(f))) print(type(f).__bases__) print(f.__doc__) """ Explanation: Rule 1: Everything is an object And I mean everything. Even things that are "primitive types" in other languages. * You can store them in variables * You can pass them as parameters to functions * You can return them as the result of functions * You can construct them at runtime And more importantly: You can treat every programming construct in a uniform and consistent way Functions as objects When you use the keyword def, Python creates a function object. Functions can be passed around as arguments to other functions. These functions that take other functions as arguments are called higher order functions. e.g. the map function takes a function and an iterable and applies the function to each item in the iterable. Classes as objects When you use the keyword class, Python executes it and creates an object. This object (the class) is itself capable of creating objects (the instances), and this is why it's a class. Since classes are objects, they must be generated by something, this is metaclasses. Since metaclasses objects, they must also be generated by something, this is again metaclasses. Therefore: Objects are instances of classes, classes are instances of metaclasses and metaclasses are instances of themselves. Metaclasses (these are objects too!) Metaclasses are the "stuff" that creates classes. You define classes in order to create objects, right? We learned that Python classes are objects. Well, metaclasses are what creates these objects. They are the classes' classes, you can picture them this way: MyClass = MetaClass() MyObject = MyClass() Metaclasses are deeper magic than 99% of users should ever worry about. If you wonder whether you need them, you don’t (the people who actually need them know with certainty that they need them, and don’t need an explanation about why). Tim Peters What is type? Remember the function type? The good old function that lets you know what type an object is. type can also create classes on the fly. type can take the description of a class as parameters, and return a class as type(name, bases, dct). name is a string giving the name of the class to be constructed. bases is a tuple giving the parent classes of the class to be constructed. dct is a dictionary of the attributes and methods of the class to be constructed. Why the heck is it written in lowercase, and not Type? Consistency with str, the class that creates strings objects, and int the class that creates integer objects. type is just the class that creates class objects. End of explanation """ issubclass(type, object) # Recap rule #1 issubclass(object, object) # Recap rule #1 issubclass(object, type) # Recap rule #1 isinstance(object, type) # Recap rule #2 isinstance(type, type) # Recap rule #2 isinstance(type, object) # Recap rule #3 isinstance(object, object) # Recap rule #3 """ Explanation: The power of type Everything is an object in Python, and they are all either instances of classes or instances of metaclasses. type is the metaclass Python uses to create (i.e. instantiate) all classes and metaclasses, including type itself. type is actually its own metaclass. This is not something you could reproduce in pure Python, and is done by cheating a little bit at the implementation level. <img src="figures/type_thug.jpg" style="display:block;margin:auto;width:40%;"/> What is object? object is the class that all classes inherit from. All classes including object are subclasses of themselves. All classes including object are subclasses of object. object.__bases__ is an empty tuple. All classes except object will have object in __bases__ in a class in their inheritance hierarchy. Kinds of objects There are two kinds of objects in Python: Type objects - can create instances, can be subclassed. e.g. type, object, int, str, list. Non-type objects - cannot create instances, cannot be subclassed. e.g. 1, "hello", [1, 2, 3]. type and object are two primitive objects of the system. objectname.__class__ exists for every object and points the type of the object. objectname.__bases__ exists for every type object and points the superclasses of the object. It is empty only for object. <img src="figures/women_objects.jpg" style="display:block;margin:auto;width:50%;"/> Recap All classes and metaclasses including object are subclasses of object. All classes and metaclasses including type are instances of type. All objects including object are instances of object. End of explanation """ from IPython.display import Image, display display(Image(url='figures/mind_blown.gif', width=400)) """ Explanation: <img src="figures/dafuq.jpg" style="display:block;margin:auto;width:50%;"/> <img src="figures/rules_objtype1.png" style="display:block;margin:auto;width:60%;"/> <img src="figures/rules_objtype2.png" style="display:block;margin:auto;width:60%;"/> <img src="figures/rules_objtype3.png" style="display:block;margin:auto;width:60%;"/> <img src="figures/rules_objtype4.png" style="display:block;margin:auto;width:60%;"/> <img src="figures/rules_objtype_final.png" style="display:block;margin:auto;width:80%;"/> End of explanation """
UltronAI/Deep-Learning
CS231n/assignment1/features.ipynb
mit
import random import numpy as np from cs231n.data_utils import load_CIFAR10 import matplotlib.pyplot as plt from __future__ import print_function %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading extenrnal modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 """ Explanation: Image features exercise Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the assignments page on the course website. We have seen that we can achieve reasonable performance on an image classification task by training a linear classifier on the pixels of the input image. In this exercise we will show that we can improve our classification performance by training linear classifiers not on raw pixels but on features that are computed from the raw pixels. All of your work for this exercise will be done in this notebook. End of explanation """ from cs231n.features import color_histogram_hsv, hog_feature def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000): # Load the raw CIFAR-10 data cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # Subsample the data mask = list(range(num_training, num_training + num_validation)) X_val = X_train[mask] y_val = y_train[mask] mask = list(range(num_training)) X_train = X_train[mask] y_train = y_train[mask] mask = list(range(num_test)) X_test = X_test[mask] y_test = y_test[mask] return X_train, y_train, X_val, y_val, X_test, y_test X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data() """ Explanation: Load data Similar to previous exercises, we will load CIFAR-10 data from disk. End of explanation """ from cs231n.features import * num_color_bins = 10 # Number of bins in the color histogram feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)] X_train_feats = extract_features(X_train, feature_fns, verbose=True) X_val_feats = extract_features(X_val, feature_fns) X_test_feats = extract_features(X_test, feature_fns) # Preprocessing: Subtract the mean feature mean_feat = np.mean(X_train_feats, axis=0, keepdims=True) X_train_feats -= mean_feat X_val_feats -= mean_feat X_test_feats -= mean_feat # Preprocessing: Divide by standard deviation. This ensures that each feature # has roughly the same scale. std_feat = np.std(X_train_feats, axis=0, keepdims=True) X_train_feats /= std_feat X_val_feats /= std_feat X_test_feats /= std_feat # Preprocessing: Add a bias dimension X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))]) X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))]) X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))]) """ Explanation: Extract Features For each image we will compute a Histogram of Oriented Gradients (HOG) as well as a color histogram using the hue channel in HSV color space. We form our final feature vector for each image by concatenating the HOG and color histogram feature vectors. Roughly speaking, HOG should capture the texture of the image while ignoring color information, and the color histogram represents the color of the input image while ignoring texture. As a result, we expect that using both together ought to work better than using either alone. Verifying this assumption would be a good thing to try for the bonus section. The hog_feature and color_histogram_hsv functions both operate on a single image and return a feature vector for that image. The extract_features function takes a set of images and a list of feature functions and evaluates each feature function on each image, storing the results in a matrix where each column is the concatenation of all feature vectors for a single image. End of explanation """ # Use the validation set to tune the learning rate and regularization strength from cs231n.classifiers.linear_classifier import LinearSVM learning_rates = [1e-9, 1e-8, 1e-7] regularization_strengths = [5e4, 5e5, 5e6] results = {} best_val = -1 best_svm = None ################################################################################ # TODO: # # Use the validation set to set the learning rate and regularization strength. # # This should be identical to the validation that you did for the SVM; save # # the best trained classifer in best_svm. You might also want to play # # with different numbers of bins in the color histogram. If you are careful # # you should be able to get accuracy of near 0.44 on the validation set. # ################################################################################ pass ################################################################################ # END OF YOUR CODE # ################################################################################ # Print out results. for lr, reg in sorted(results): train_accuracy, val_accuracy = results[(lr, reg)] print('lr %e reg %e train accuracy: %f val accuracy: %f' % ( lr, reg, train_accuracy, val_accuracy)) print('best validation accuracy achieved during cross-validation: %f' % best_val) # Evaluate your trained SVM on the test set y_test_pred = best_svm.predict(X_test_feats) test_accuracy = np.mean(y_test == y_test_pred) print(test_accuracy) # An important way to gain intuition about how an algorithm works is to # visualize the mistakes that it makes. In this visualization, we show examples # of images that are misclassified by our current system. The first column # shows images that our system labeled as "plane" but whose true label is # something other than "plane". examples_per_class = 8 classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] for cls, cls_name in enumerate(classes): idxs = np.where((y_test != cls) & (y_test_pred == cls))[0] idxs = np.random.choice(idxs, examples_per_class, replace=False) for i, idx in enumerate(idxs): plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1) plt.imshow(X_test[idx].astype('uint8')) plt.axis('off') if i == 0: plt.title(cls_name) plt.show() """ Explanation: Train SVM on features Using the multiclass SVM code developed earlier in the assignment, train SVMs on top of the features extracted above; this should achieve better results than training SVMs directly on top of raw pixels. End of explanation """ print(X_train_feats.shape) from cs231n.classifiers.neural_net import TwoLayerNet input_dim = X_train_feats.shape[1] hidden_dim = 500 num_classes = 10 net = TwoLayerNet(input_dim, hidden_dim, num_classes) best_net = None ################################################################################ # TODO: Train a two-layer neural network on image features. You may want to # # cross-validate various parameters as in previous sections. Store your best # # model in the best_net variable. # ################################################################################ pass ################################################################################ # END OF YOUR CODE # ################################################################################ # Run your neural net classifier on the test set. You should be able to # get more than 55% accuracy. test_acc = (net.predict(X_test_feats) == y_test).mean() print(test_acc) """ Explanation: Inline question 1: Describe the misclassification results that you see. Do they make sense? Neural Network on image features Earlier in this assigment we saw that training a two-layer neural network on raw pixels achieved better classification performance than linear classifiers on raw pixels. In this notebook we have seen that linear classifiers on image features outperform linear classifiers on raw pixels. For completeness, we should also try training a neural network on image features. This approach should outperform all previous approaches: you should easily be able to achieve over 55% classification accuracy on the test set; our best model achieves about 60% classification accuracy. End of explanation """
kanhua/pypvcell
demos/dealing_with_spectrum_data.ipynb
apache-2.0
%matplotlib inline import numpy as np import scipy.constants as sc import matplotlib.pyplot as plt from pypvcell.spectrum import Spectrum from pypvcell.illumination import Illumination from pypvcell.photocurrent import gen_step_qe_array """ Explanation: Dealing with spectrum data This tutorial demonstrates how to use Spectrum class to do various arithmetic operations of Spectrum. This demo uses the Jsc calculation as an example, namely \begin{equation} J_{sc}=\int \phi(E)QE(E) dE \end{equation} where $\phi$ is the illumination spectrum in photon flux, $E$ is the photon energy and $QE$ is the quantum efficiency. End of explanation """ qe=gen_step_qe_array(1.42,0.9) plt.plot(qe[:,0],qe[:,1]) plt.xlabel('photon energy (eV)') plt.ylabel('QE') """ Explanation: Quantum efficiency We first use a function gen_step_qe_array to generate a quantum efficiency spectrum. This spectrum is a step function with a cut-off at the band gap of 1.42 eV. End of explanation """ qe_sp=Spectrum(x_data=qe[:,0],y_data=qe[:,1],x_unit='eV') """ Explanation: qe is a numpy array. The recommeneded way to handle it is converting it to Spectrum class: End of explanation """ qe=qe_sp.get_spectrum(to_x_unit='nm') plt.plot(qe[0,:],qe[1,:]) plt.xlabel('wavelength (nm)') plt.ylabel('QE') plt.xlim([300,1100]) """ Explanation: Unit conversion When we want to retrieve the value of qe_sp we have to specicify the unit of the wavelength. For example, say, converting the wavelength to nanometer: End of explanation """ # Calulate the portion of "non-absorbed" photons, assuming QE is equivalent to absorptivity tr_sp=1-qe_sp tr=tr_sp.get_spectrum(to_x_unit='nm') plt.plot(tr[0,:],tr[1,:]) plt.xlabel('wavelength (nm)') plt.ylabel('QE') plt.xlim([300,1100]) """ Explanation: Arithmetic operation We can do arithmetic operation directly with Spectrum class such as End of explanation """ std_ill=Illumination("AM1.5g") """ Explanation: Illumination spectrum pypvcell has a class Illumination that is inherited from Spectrum to handle the illumination. It inherits all the capability of Spectrum but has several methods specifically for sun illumination. Some default standard spectrum is embedded in the pypvcell: End of explanation """ ill=std_ill.get_spectrum('nm') plt.plot(*ill) plt.xlabel("wavelength (nm)") plt.ylabel("intensity (W/m^2-nm)") fig, ax1= plt.subplots() ax1.plot(*ill) ax2 = ax1.twinx() ax2.plot(*qe) ax1.set_xlim([400,1600]) ax2.set_ylabel('sin', color='r') ax2.tick_params('y', colors='r') ill[:,-1] qe[:,-1] """ Explanation: Show the values of the data End of explanation """ std_ill.total_power() """ Explanation: Calcuate the total intensity in W/m^2 End of explanation """ ill=std_ill.get_spectrum('eV') plt.plot(*ill) plt.xlabel("wavelength (eV)") plt.ylabel("intensity (W/m^2-eV)") """ Explanation: Unit conversion of illumination spectrum It requires a bit of attention of converting spectrum that is in the form of $\phi(E)dE$, i.e., the value of integration is a meaningful quantitfy such as total power. This has been also handled by Illumination class. In the following case, we convert the wavelength to eV. Please note that the units of intensity is also changed to W/m^2-eV. End of explanation """ # calculate \phi(E)QE(E) dE. # Spectrum class automatically convert the units and align the x-data by interpolating std_ill jsc_e=std_ill*qe_sp """ Explanation: Spectrum multiplication To calcualte the overall photocurrent, we have to calculate $\phi(E)QE(E) dE$ first. This would involves some unit conversion and interpolation between two spectrum. However, this is easily dealt by Spectrum class: End of explanation """ jsc_e_a=jsc_e.get_spectrum('nm',to_photon_flux=True) plt.plot(*jsc_e_a) plt.xlim([300,1100]) """ Explanation: Here's a more delicate point. We should convert the unit to photon flux in order to calculate Jsc. End of explanation """ sc.e*np.trapz(y=jsc_e_a[1,:],x=jsc_e_a[0,:]) """ Explanation: Integrate it yields the total photocurrent density in A/m^2 End of explanation """ from pypvcell.photocurrent import calc_jsc calc_jsc(std_ill,qe_sp) """ Explanation: In fact, pypvcell already provides a function calc_jsc() for calculating Jsc from given spectrum and QE: End of explanation """
csherwood-usgs/landlab
landlab/components/depth_dependent_cubic_soil_creep/tests/solution_for_4x7_grid_steady_state.ipynb
mit
D = 0.01 Sc = 0.8 Hstar = 0.5 E = 0.0001 P0 = 0.0002 """ Explanation: This notebook works out the expected hillslope sediment flux, topography, and soil thickness for steady state on a 4x7 grid. This provides "ground truth" values for tests. Let the hillslope erosion rate be $E$, the flux coefficient $D$, critical gradient $S_c$, and slope gradient $S$. The regolith thickness is $H$, with bare-bedrock production rate $P_0$ and depth-decay $H_*$. Finally, we set the transport decay scale the same as the production depth-decay scale. Then we have the hillslope flux as a function of distance from ridgetop, $x$, as $q_s = E x = \left( DS + \frac{D}{S_c^2} S^3 \right) \left(1 - e^{ -H/H_*} \right)$ Parameter values: let $D = 0.01 m^2 y^{-1}$, $S_c = 0.8$, $H_* = 0.5 m$, $P_0 = 0.0002$, and $E = 0.0001 m y^{-1}$: End of explanation """ import math H = -Hstar * math.log(E / P0) H """ Explanation: With that, calculate the expected equilibrium $H$: $E = P_0 e^{-H/H_*}$ $H = -H_* \ln (E/P_0)$ Plugging in the numbers: End of explanation """ P0 * math.exp(-H / Hstar) """ Explanation: Double check: if we plug this $H$ back in, do we recover $E$? End of explanation """ qs = 25 * E qs """ Explanation: Yes, good. Now, our geometry consists of a hillslope discretized into seven nodes. The two on either end are zero-elevation fixed boundaries, so we have to find the elevations of the five interior ones. But the hillslope should be symmetrical, so we really only have to find 1, 2, and 3 as in 0 --- 1 --- 2 --- 3 --- etc. where node 3 is the top of the hill. The slope between nodes 1 and 0 must be positive (uphill to right). It must be just steep enough to carry all the sediment from its own cell plus the sediment from node 2's cell, plus half the sediment from node 3's cell. We'll assume all cells have width $dx = 10 m$. Therefore, we have to transport sediment produced in strip 25 m x 1 m, or 25 m2. Our expected flux is then: End of explanation """ f = 1.0 - math.exp(-H / Hstar) f """ Explanation: In fact, for each interface between cells, the slope at that interface is given by the following polynomial: $f\frac{D}{S_c^2} S^3 + 0 S^2 + fDS - qs = 0$ Here the $f$ is shorthand for $1 - \exp (-H/H_*)$. I've included the zero in front of the $S^2$ term just to make it explicit. So, for the slope between nodes 0 and 1, we need first to define our polynomial coefficients, $p$. Then we'll invoke numpy's roots function to solve for $S$. To be consistent with roots usage, we'll call the coefficient of the highest (cubic) term $p_0$, the next highest (square) $p_1$, etc. So: $p_0 S^3 + p_1 S^2 + p_2 S + p_3 = 0$ Clearly, we'll need $f$, so let's calculate that first: End of explanation """ import numpy as np p = np.zeros(4) p[0] = (f * D) / (Sc ** 2) p[1] = 0.0 p[2] = f * D p[3] = -qs p """ Explanation: Now, let's calculate the coefficients: $p_0 = f D / S_c^2$ $p_1 = 0$ $p_2 = f D$ $p_3 = -q_s$ Clearly, only $p_3$ will vary from node to node. Here are the numbers: End of explanation """ my_roots = np.roots(p) my_roots """ Explanation: Now let's find the roots of this cubic polynomial: End of explanation """ Spred = 0.4 qspred = (D * Spred + (D / (Sc * Sc)) * (Spred ** 3)) * (1.0 - np.exp(-H / Hstar)) qspred """ Explanation: There's just one real root here: $S \approx 1.33$. Let's plug that back in and see if we recapture the correct $qs$: End of explanation """ p[3] = -0.0015 my_roots = np.roots(p) my_roots """ Explanation: Great! That's extremely close. Let's try with the slope between nodes 1 and 2. The only difference here is that the flux $qs$ now derives from just $15 m^2$, so $qs = 0.0015: End of explanation """ Spred = 0.269437 qspred = (D * Spred + (D / (Sc * Sc)) * (Spred ** 3)) * (1.0 - np.exp(-H / Hstar)) qspred """ Explanation: Once again, let's test: End of explanation """ p[3] = -0.0005 my_roots = np.roots(p) my_roots """ Explanation: Finally, the slope between 2 and 3, which needs to carry half a cell's worth of sediment, or $qs = 0.0005$: End of explanation """ Spred = 0.0985 qspred = (D * Spred + (D / (Sc * Sc)) * (Spred ** 3)) * (1.0 - np.exp(-H / Hstar)) qspred """ Explanation: And check this: End of explanation """ elev = np.zeros(7) elev[1] = 0.4 * 10.0 elev[5] = elev[1] elev[2] = elev[1] + 0.269437 * 10.0 elev[4] = elev[2] elev[3] = elev[2] + 0.0985 * 10.0 elev """ Explanation: Fabulous. Now to find the predicted elevations: just add up slope x distance for each node, going inward from the boundaries: End of explanation """ S = 0.4 Deff = D * ((S / Sc) ** 2) Deff """ Explanation: So, at equilibrium, our model should create a symmetrical hill with a peak elevation a little under 8 m and a soil thickness of 0.347 m. What time step size would be reasonable? Start by defining an "effective D" parameter, which is the linearized coefficient in front of the cubic term: $D_{eff} = D (S / S_c)^2$ Then take the steepest steady state slope: End of explanation """ 10.0*10.0/(2.0*Deff) """ Explanation: Now, maximum time step size should be $\Delta x^2 / 2 D_{eff}$: End of explanation """ Hstar / P0 """ Explanation: There's also a constraint for the weathering piece. The characteristic time scale is $T = H_* / P_0$, which in this case is: End of explanation """ 80.0 / E """ Explanation: So, this calculation suggests that weathering is the limiting factor on time-step size. We might choose 250 years for a reasonably smooth solution. The time it would take for baselevel fall to bring the crest of the hill up to its ten times its equilibrium elevation of 8 m: End of explanation """ 8.0e5/250. """ Explanation: So let's say we run for 800,000 years at 250 year time steps: End of explanation """
samuxiii/notebooks
simpsons/Simpsons-PyTorch.ipynb
apache-2.0
import os, random from scipy.misc import imread, imresize width = 0 lenght = 0 num_test_images = len(test_image_names) for i in range(num_test_images): path_file = os.path.join(test_root_path, test_image_names[i]) image = imread(path_file) width += image.shape[0] lenght += image.shape[1] width_mean = width//num_test_images lenght_mean = lenght//num_test_images dim_size = (width_mean + lenght_mean) // 2 print("Width mean: {}".format(width_mean)) print("Lenght mean: {}".format(lenght_mean)) print("Size mean dimension: {}".format(dim_size)) """ Explanation: Calculate mean width and lenght from test images End of explanation """ import matplotlib.pyplot as plt idx = random.randint(0, num_test_images) sample_file, sample_name = test_image_names[idx], test_image_names[idx].split('_')[:-1] path_file = os.path.join(test_root_path, sample_file) sample_image = imread(path_file) print("Label:{}, Image:{}, Shape:{}".format('_'.join(sample_name), idx, sample_image.shape)) plt.figure(figsize=(3,3)) plt.imshow(sample_image) plt.axis('off') plt.show() """ Explanation: Size mean dimension will be used for the resizing process. All the images will be scaled to (149, 149) since it's the average of the test images. Show some test examples End of explanation """ def get_num_of_samples(): count = 0 for _,character in enumerate(character_directories): path = os.path.join(train_root_path, character) count += len(listdir(path)) return count def get_batch(batch_init, batch_size): data = {'image':[], 'label':[]} character_batch_size = batch_size//len(character_directories) character_batch_init = batch_init//len(character_directories) character_batch_end = character_batch_init + character_batch_size for _,character in enumerate(character_directories): path = os.path.join(train_root_path, character) images_list = listdir(path) for i in range(character_batch_init, character_batch_end): if len(images_list) == 0: continue #if this character has small number of features #we repeat them if i >= len(images_list): p = i % len(images_list) else: p = i path_file = os.path.join(path, images_list[p]) image = imread(path_file) #all with the same shape image = imresize(image, (dim_size, dim_size)) data['image'].append(image) data['label'].append(character) return data def get_batches(num_batches, batch_size, verbose=False): #num max of samples num_samples = get_num_of_samples() #check number of batches with the maximum max_num_batches = num_samples//batch_size - 1 if verbose: print("Number of samples:{}".format(num_samples)) print("Batches:{} Size:{}".format(num_batches, batch_size)) assert num_batches <= max_num_batches, "Surpassed the maximum number of batches" for i in range(0, num_batches): init = i * batch_size if verbose: print("Batch-{} yielding images from {} to {}...".format(i, init, init+batch_size)) yield get_batch(init, batch_size) #testing generator batch_size = 500 for b in get_batches(10, batch_size, verbose=True): print("\t|- retrieved {} images".format(len(b['image']))) """ Explanation: Making batches (resized) End of explanation """ from sklearn import preprocessing #num characters num_characters = len(character_directories) #normalize def normalize(x): #we use the feature scaling to have all the batches #in the same space, that is (0,1) return (x - np.amin(x))/(np.amax(x) - np.amin(x)) #one-hot encode lb = preprocessing.LabelBinarizer() lb = lb.fit(character_directories) def one_hot(label): return lb.transform([label]) """ Explanation: Preprocessing data End of explanation """ num_batches = 40 batch_size = 500 import pickle import numpy as np cnt_images = 0 for cnt, b in enumerate(get_batches(num_batches, batch_size)): data = {'image':[], 'label':[]} for i in range( min(len(b['image']), batch_size) ): image = np.array( b['image'][i] ) label = np.array( b['label'][i] ) #label = label.reshape([-1,:]) if len(image.shape) == 3: data['image'].append(normalize(image)) data['label'].append(one_hot(label)[-1,:]) cnt_images += 1 else: print("Dim image < 3") with open("simpson_train_{}.pkl".format(cnt), 'wb') as file: pickle.dump(data, file, pickle.HIGHEST_PROTOCOL) print("Loaded {} train images and stored on disk".format(cnt_images)) #testing load from file import pickle with open('simpson_train_0.pkl', 'rb') as file: data = pickle.load(file) print("Example of onehot encoded:\n{}".format(data['label'][0])) print("Data shape: {}".format(data['image'][0].shape)) """ Explanation: Storing preprocessed batches on disk End of explanation """ import torch import torchvision device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Assume that we are on a CUDA machine, then this should print a CUDA device: print(device) import torch.nn as nn import torch.nn.functional as F num_characters = 47 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 32, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(32, 64, 5) self.fc1 = nn.Linear(64 * 34 * 34, num_characters) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) #print("shape: {}".format(x.size())) x = x.view(x.size(0), -1) x = self.fc1(x) return x net = Net() #move the neural network to the GPU if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs net = nn.DataParallel(net) net.to(device) import torch.optim as optim loss_fn = nn.CrossEntropyLoss() #buit-in softmax, we can use logits directly optimizer = optim.Adam(net.parameters()) import os import pickle from sklearn.model_selection import train_test_split def getDatasetsFromPickle(file): #print("Processing: {}".format(fname)) data = pickle.load(file) X_train, X_val, y_train, y_val = train_test_split(data['image'], data['label'], test_size=0.2) inputs_train, labels_train = torch.FloatTensor(X_train), torch.FloatTensor(y_train) inputs_val, labels_val = torch.FloatTensor(X_train), torch.FloatTensor(y_train) #permute image as (samples, x, y, channels) to (samples, channels, x, y) inputs_train = inputs_train.permute(0, 3, 1, 2) inputs_val = inputs_val.permute(0, 3, 1, 2) #move the inputs and labels to the GPU return inputs_train.to(device), labels_train.to(device), inputs_val.to(device), labels_val.to(device) stats = {'train_loss':[], 'val_loss':[], 'acc':[]} for epoch in range(3): # loop over the dataset multiple times for i in range(100): fname = "simpson_train_{}.pkl".format(i) if os.path.exists(fname): with open(fname, 'rb') as file: #retrieve the data inputs_train, labels_train, inputs_val, labels_val = getDatasetsFromPickle(file) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs_train) #cross entropy loss doesn't accept onehot encoded targets # |-> use the index class instead lbls_no_onehot_encoded = torch.argmax(labels_train, dim=1) loss = loss_fn(outputs, lbls_no_onehot_encoded) loss.backward() optimizer.step() #statistics stats['train_loss'].append(loss.item()) with torch.no_grad(): outputs = net(inputs_val) label_val_classes = torch.argmax(labels_val, dim=1) output_classes = torch.argmax(outputs, dim=1) stats['val_loss'].append( loss_fn(outputs, label_val_classes).item() ) stats['acc'].append( (output_classes == label_val_classes).sum().item() / label_val_classes.size(0) ) #printouts if i % 20 == 19: printout = "Epoch: {} Batch: {} Training loss: {:.3f} Validation loss: {:.3f} Accuracy: {:.3f}" print(printout.format(epoch + 1, i + 1, stats['train_loss'][-1], stats['val_loss'][-1], stats['acc'][-1],)) else: break print('Finished Training') import matplotlib.pyplot as plt plt.plot(stats['train_loss'], label='Train Loss') plt.plot(stats['val_loss'], label='Validation Loss') plt.plot(stats['acc'], label='Accuracy') plt.legend() """ Explanation: NOTE Since here the data is already processed and saved as pickle files. Building the Network End of explanation """ import warnings warnings.filterwarnings('ignore') #select random image idx = random.randint(0, num_test_images) sample_file, sample_name = test_image_names[idx], test_image_names[idx].split('_')[:-1] path_file = os.path.join(test_root_path, sample_file) #read them test_image = normalize(imresize(imread(path_file), (dim_size, dim_size))) test_label_onehot = one_hot('_'.join(sample_name))[-1,:] #move to tensors test_image, test_label_onehot = torch.FloatTensor(test_image), torch.FloatTensor(test_label_onehot) #permute image as (samples, x, y, channels) to (samples, channels, x, y) test_image = test_image.permute(2, 0, 1) test_image.unsqueeze_(0) #move to GPU test_image, test_label_onehot = test_image.to(device), test_label_onehot.to(device) ## with torch.no_grad(): output = net(test_image) predicted_character = torch.argmax(output.data, 1) actual_character = torch.argmax(test_label_onehot) print("Right!!") if (predicted_character == actual_character) else print("Wrong..") #showing actual_name = ' '.join([s.capitalize() for s in sample_name]) print("Label: {}".format(actual_name)) pred_name = lb.inverse_transform(output.cpu().numpy()).item() #copy from cuda to cpu, then to numpy prediction = ' '.join([s.capitalize() for s in pred_name.split('_')]) print("Prediction: {}".format(prediction)) plt.figure(figsize=(3,3)) plt.imshow(test_image.permute(0, 2, 3, 1).squeeze()) plt.axis('off') plt.show() """ Explanation: Testing model End of explanation """
davidbrough1/pymks
notebooks/structure_md_2D.ipynb
mit
import pymks %matplotlib inline %load_ext autoreload %autoreload 2 import numpy as np import matplotlib.pyplot as plt from pymks_share import DataManager manager = DataManager('pymks.me.gatech.edu') X = manager.fetch_data('Molecular Dynamics') """ Explanation: Phase Transition in Molecular Dynamics Simulation Authors Alexander M. Lohse, Georgia Tech, MSE Ross J. Verploegh, Georgia Tech, ChBE Introduction Classical molecular mechanics (MM) is a powerful tool in materials engineering. Some examples where molecular dynamics (MD) has been applied include large simulations of protein folding for drug design, diffusion of gas molecules through organic/inorganic nanoporous materials, and phase separation of polymers. Sometimes, however, analyzing the trajectory data (e.g. atom types and atomic coordinates) with respect to some 1-D observable, such as geometric distances or density, does not capture all the important information. The tools within PyMKS allow for analysis of MD trajectory data in an unbiased way. This example uses MKSStructureAnalysis to look at loading-induced thermodynamic transition of a metal organic framework (ZIF-8) from low to high loading configurations. This example is particularly interesting because it is not clear what 1-D or 2-D reaction coordinate could be used to describe this transition. Molecular Dynamics Simulation of a Thermodynamic Phase Transition A molecular dynamics simulation was performed using LAMMPS Molecular Dynamics Simulator. The flexible ZIF-8 framework and nitrogen gas molecules were described using the force fields described in this manuscript [1]. For those familiar with the details of MD simulations, this simulation was performed in the NVT-ensemble with a 1.0 femtosecond timestep. The simulation was run for a total of 35 picoseconds with snapshots taken every 15 femtoseconds (total snapshots in trajectory: 2332). Data Generation: Mapping MD Data to a Grid For use within PyMKS, atomic data needs to be mapped to a 2-D or 3-D grid such that 2-point statistics can be calculated. For our example, only the hydrogen coordinates from the ZIF-8 framework are mapped to a fixed 70x70x70 voxel 3-D grid, with a resolution of 0.5 A. For ZIF-8, there are hydrogens in two types of chemical environments: methyl-group hydrogens and those on the imidazole ring. Each grid space is either assigned a 0 (void space=no hydrogen), a 1 (filled by a methyl hydrogen), or a 2 (filled by an imidazole hydrogen). The 3-D grid is then mapped into a 2-D array by summing in the z-direction for visual purposes only; the following analysis of this data could have been applied to the full 3-D data as well. The below cells just loads the necessary extensions to python for plotting (MatPlotLib) and numerical analysis (NumPy). The data is loaded from a Georgia Tech server. End of explanation """ print X.shape """ Explanation: As mentioned, there are 2332 snapshots at 15 femtosecond time intervals on a 70 x 70 pixel 2-D grid. End of explanation """ from pymks.tools import draw_microstructures sample_size = 10 X_examples = X[::sample_size] time = np.arange(X_examples.shape[0])[:] * sample_size * 15 draw_microstructures((X_examples[::40])) """ Explanation: We can draw what the structures look like with draw_microstructures. From left to right the structure is changing as the system goes through a phase transition. Do you notice a difference? End of explanation """ from pymks import MKSStructureAnalysis, PrimitiveBasis from sklearn.decomposition import RandomizedPCA prim_basis = PrimitiveBasis(2, domain=[0, 255]) analysis = MKSStructureAnalysis(basis=prim_basis,correlations=[(1, 1)], store_correlations=True, periodic_axes=[0,1]) analysis.correlations """ Explanation: Setup the model Now that we have microstructures we use MKSStructureAnalysis. We use the PrimitiveBasis because we will only have two states, voxel with Hydrogen or a voxel with no Hydrogen. Our data domain ranges from 0 to 255 corresponding to the grayscale (0 is black, 255 is white) as seen in the microstructures drawn above). Basically, we are reducing this from 255 (grayscale) states to a probability between 0 and 1. The statistics are then run on this basis we pass to MKSStructureAnalysis. As printed in the output, we are doing an autocorrelation from Hydrogen to Hydrogen (1,1) and a cross-correlation from Hydrogen to Void-Space (1,0). Change the corelations and rerun to see how the PCA plots further down change. Especially, note the increase in variance of PC1 with the addition of the (0,0) correlation. End of explanation """ analysis.n_components = 40 analysis.fit(X_examples) from pymks.tools import draw_component_variance draw_component_variance(analysis.dimension_reducer.explained_variance_ratio_) """ Explanation: Polynomial Order Now that our model is setup we can calculate the statistics and regression with analysis.fit. Here we fit the model built above with a subset of our data, X_examples. We graph the variance versus the number of components in our PCA model. By default the PrimitiveBasis uses a polynomial degree of one. End of explanation """ from pymks.tools import draw_components_scatter from pymks.tools import draw_evolution analysis.fit(X_examples) pcs_5 = analysis.reduced_fit_data[:, :5].T.reshape((-1)) times_5 = np.tile(time, 5) results = np.concatenate((times_5[:, None], pcs_5[:, None]), axis=1) labels = ['Component 1', 'Component 2', 'Component 3', 'Component 4', 'Component 5'] draw_evolution(np.array_split(results, 5), labels, legend_outside=True) """ Explanation: Thee graph above shows that over 92% of the statistics are captured with just 2 components. Plotting components We now can plot the principle components versus time. We see a distinct correlation beteen PC1 and PC2 and note that PC1 is correlated with time. This likely indicates the time of the phase transition in the simulation. We could next try plotting PC1 vs. various geometric order parameters to see if PC1 (or any other principal component) relates. End of explanation """ draw_components_scatter([analysis.reduced_fit_data[:, :3]], ['X_example'], legend_outside=True) """ Explanation: Here we plot the first 3 principal components and note two general clusterings. This view captures roughly 95% of the variance in our data. End of explanation """ from pymks.tools import draw_correlations from pymks.tools import draw_components pcs = analysis.components_ draw_components(pcs[:4], fontsize=20) """ Explanation: Let's take a look at the two point statistics. We look at the images for principal components 1-4. Each image is the correlation for (1,1), or the Hydrogen probability. End of explanation """
louridas/rwa
content/notebooks/chapter_03.ipynb
bsd-2-clause
def create_pq(): return [] """ Explanation: Compressing Chapter 3 of Real World Algorithms. Panos Louridas<br /> Athens University of Economics and Business Huffman Encoding To implement Huffman encoding we need a priority queue. We will implement the priority queue as a min-heap. A min-heap is a complete binary tree in which the value of each node is less than or equal to the values of its children. The heap will be implemented as an array with the root of the heap at position 0. Then for each node, $i$, its left child is at position $2i + 1$ and its right child is at position $2i + 2$. ``` 6 / \ 11 8 / \ / \ 17 19 13 12 [6, 11, 8, 17, 19, 13, 12] ``` We create a priority queue by creating an empty list: End of explanation """ def add_last(pq, c): pq.append(c) """ Explanation: To insert an element in the priority queue, we will first put it at the end of the corresponding list (so it will be at the bottom of the queue, or the equivalent min-heap). Then the element will start floating up until it reaches a level where it is smaller than its parent node. Here is the function that simply adds an element at the end of the priority queue. End of explanation """ def root(pq): return 0 def set_root(pq, c): if len(pq) != 0: pq[0] = c """ Explanation: It will also be useful to have some helper functions for the priority queue. We need a function that returns the position of the root, i.e., the first element in the queue for a non-empty queue. That is simple, it will always be position 0. Then, we need a function that assigns an element to the root of a non-empty queue. If the queue is empty, nothing will be done; otherwise, the item will be assigned to position 0. End of explanation """ def get_data(pq, p): return pq[p] """ Explanation: We also need a function that will return the data stored at a specified position in the queue. In essence, that is just the element at that position in the list. End of explanation """ def children(pq, p): if 2*p + 2 < len(pq): return [2*p + 1, 2*p + 2] else: return [2*p + 1] """ Explanation: As we have said, the children of node $c$ are at positions $2c + 1$ and $2c + 2$. So, here is the function that returns the children of a node in the queue. Note that we must be careful to return the right number of children. A node that has children has always two children, except if its only child is the last element of the queue; check the two following examples: ``` 6 6 / \ / \ 11 8 11 8 / \ / \ / \ / 17 19 13 12 17 19 13 [6, 11, 8, 17, 19, 13, 12] [6, 11, 8, 17, 19, 13] ``` End of explanation """ def parent(p): return (p - 1) // 2 """ Explanation: Conversely, the parent of a node at position $c$ is at position $\lfloor(c - 1)\rfloor / 2$. End of explanation """ def exchange(pq, p1, p2): pq[p1], pq[p2] = pq[p2], pq[p1] """ Explanation: To swap two elements of the queue we will use the following function: End of explanation """ def insert_in_pq(pq, c): add_last(pq, c) i = len(pq) - 1 while i != root(pq) and get_data(pq, i) < get_data(pq, parent(i)): p = parent(i) exchange(pq, i, p) i = p """ Explanation: With all this in place, here is how we can insert an element in the priority queue: End of explanation """ min_queue = create_pq() insert_in_pq(min_queue, 11) print(min_queue) """ Explanation: We can follow the queue construction process step-by-step. End of explanation """ insert_in_pq(min_queue, 17) print(min_queue) """ Explanation: ``` 11 ``` End of explanation """ insert_in_pq(min_queue, 12) print(min_queue) """ Explanation: 11 / 17 End of explanation """ insert_in_pq(min_queue, 8) print(min_queue) """ Explanation: 11 / \ 17 12 End of explanation """ insert_in_pq(min_queue, 19) print(min_queue) """ Explanation: 8 / \ 11 12 / 17 End of explanation """ insert_in_pq(min_queue, 13) print(min_queue) """ Explanation: 8 / \ 11 12 / \ 17 19 End of explanation """ insert_in_pq(min_queue, 6) print(min_queue) """ Explanation: 8 / \ 11 12 / \ / 17 19 13 End of explanation """ def extract_last_from_pq(pq): return pq.pop() """ Explanation: 6 / \ 11 8 / \ / \ 17 19 13 12 Now we must implement the extraction of elements from the queue. To do that, we must use a helper function that will extract the last element of the queue. That is equivalent to removing the last element of the corresponding list. End of explanation """ def has_children(pq, p): return 2*p + 1 < len(pq) """ Explanation: We will also need a function that will determine whether a given node has children. Remember that a node $p$ may have children at positions $2p + 1$ and $2p + 2$. Therefore we only need to check whether position $2p + 1$ is a valid position in the list. End of explanation """ def extract_min_from_pq(pq): c = pq[root(pq)] set_root(pq, extract_last_from_pq(pq)) i = root(pq) while has_children(pq, i): # Use the data stored at each child as the comparison key # for finding the minimum. j = min(children(pq, i), key=lambda x: get_data(pq, x)) if get_data(pq, i) < get_data(pq, j): return c exchange(pq, i, j) i = j return c """ Explanation: With this, we can write the following element extraction function. Note that inside the while loop we want to get the minimum of the children. To do that we use Python's min() function, telling it to use the data of each child for the comparison. End of explanation """ m = extract_min_from_pq(min_queue) print(m) print(min_queue) """ Explanation: Let's see how it works in practice. End of explanation """ m = extract_min_from_pq(min_queue) print(m) print(min_queue) """ Explanation: 8 / \ 11 12 / \ / 17 19 13 End of explanation """ text = "This is the phrase that we want to compress." symb2freq = {} for ch in text: # If ch is not in the frequency table # we have to create an entry for it # initialized to zero. if ch not in symb2freq: symb2freq[ch] = 0 # Add one to the number of times we have # seen ch. symb2freq[ch] += 1 import pprint pprint.pprint(symb2freq) """ Explanation: 11 / \ 13 12 / \ 17 19 With the priority queue in our hands we can implement Huffman encoding. We will adapt the example given in Rosetta Code. We start by reading the text that we want to compress and counting the frequencies of its characters. To do that we can use a dictionary. End of explanation """ from collections import defaultdict symb2freq = defaultdict(int) for ch in text: symb2freq[ch] += 1 pprint.pprint(symb2freq) """ Explanation: Instead of using a bare-bones dictionary, it is more practical to use Python's defaultdict. This will add an entry with its value as zero if we try to access an element that does not already exist in the dictionary. End of explanation """ from collections import Counter symb2freq = Counter(text) pprint.pprint(symb2freq) """ Explanation: We can simplify things even further if we use Python's Counter. This will automatically count the occurrences of the characters in our text. End of explanation """ pq = create_pq() # The priority queue will be initialized with elements of the form: # [value, [character, encoding]] for key, value in symb2freq.items(): insert_in_pq(pq, [value, [key, '']]) pprint.pprint(pq) """ Explanation: Going now to the priority queue, it will contain elements of the following form: [value, [character, encoding], ... ] The value is the frequency corresponding to the element of the queue. The ... above refers to repeated [character, encoding] instances; these are the characters represented by the specific element of the queue and the Huffman encodings of these characters. We will see that this will allow us to create the Huffman encoding while we are building the encoding tree, without needing to traverse it afterwards. We start by putting the characters and their frequencies in the priority queue. End of explanation """ def create_huffman_code(pq): while len(pq) > 1: # Extract the two minimum items from the priority queue. x = extract_min_from_pq(pq) y = extract_min_from_pq(pq) # Get all the [character, encoding] items associated with x; # as x is the left child of the new node, prepend '0' # to their encodings. for pair in x[1:]: pair[1] = '0' + pair[1] # Do the same for y; as y is the right child of the # new node, prepend '1' to their encodings. for pair in y[1:]: pair[1] = '1' + pair[1] # Insert a new node with the sum of the occurrences # of the two extracted nodes and the updated # [character, encoding] sequences. insert_in_pq(pq, [x[0] + y[0]] + x[1:] + y[1:]) return extract_min_from_pq(pq) hc = create_huffman_code(pq) print("Huffman Code:") pprint.pprint(hc) """ Explanation: As you can see, right now we have inserted into the queue the individual characters of the text with their occurrences. None of them has any Huffman encoding, yet. To create the Huffman encodings, we work as follows. Note that we do not really need to create a Huffman tree explicitly at all. When we take out two nodes from the queue and create a new node with the two nodes as children, we only need to add a 0 to the encoding of the left child and a 1 to the encoding of the right child. So, when we take out the two nodes: [1, ['.', '']] [1, ['T', '']] We will get the node: [2, ['.', '0'], ['T', '1']] Then, when we take out the two nodes: [2, ['.', '0'], ['T', '1']] [1, ['n', '']] We will get the node: [3, ['n', '0'], ['.', '10'], ['T', '11']] That explains why we had the priority queue have elements of the form: [value, [character, encoding], [character, encoding], ... ] End of explanation """ hc_table = { character: encoding for [character, encoding] in hc[1:]} pprint.pprint(hc_table) """ Explanation: As we can see, the Huffman encoding was returned in a list. It is more practical to enter it into a dictionary, where the keys are the characters and the values are their encodings. We will call this Huffman coding table. End of explanation """ huffman_encoding = [ hc_table[c] for c in text ] print(text) print("Original contents:") bit_representation = [ f'{ord(c):b}' for c in text ] print(bit_representation) print("Compressed contents:") print(huffman_encoding) """ Explanation: Now we can encode our original text. To see what is happening, we take the original text and we print the binary representation of each character. The Python ord() function returns the Python encoding for a character, which we convert to binary format. Then we print out the compressed text, so you can see the savings: End of explanation """ print(len(text)) """ Explanation: Huffman Compression and Decompression for Files Careful, now we go into a bit more advanced stuff, on how we would actually store compressed data and how we will the decompress them. Notice that the compressed contents we have been displaying are not really compressed. Let us check how long our original text was: End of explanation """ huffman_string = ''.join(huffman_encoding) print(huffman_string) print(len(huffman_string)) """ Explanation: It is 44 characters long; whereas the compressed text is: End of explanation """ for i in range(0, len(huffman_string), 8): chunk = huffman_string[i:i+8] byte_chunk = int(chunk, 2).to_bytes(1, byteorder='big') print(f'{chunk} {byte_chunk}') """ Explanation: This happens because we store 0 and 1 as characters, not as individual bits. In reality, we want to store them using single bits. Then we will achieve the promised savings. To convert them to bits, we'll break the Huffman string to chunks of eight, and we will convert each one of them to two bytes, equal to to exactly eight bits. If the Huffman string is not divisible by eight, we assume that the last chunk is padded to the left with zeros. To convert a bit string to bytes, we use the function int(chunk, 2), which will treat chunk as a string representing a binary (base 2) number. We convert the result to bytes, using to_bytes(1, byteorder='big'): we need one byte to represent the integer we got. The to_bytes() function returns an array of bytes, here it returns an array of one. In general, though, it could be longer. The second argument specifies how bytes will be put into the array. By giving byteorder='big' we specify that and we want the bytes to follow the big endian representation, i.e., if we put two bytes into the array, the most significant of the two bytes is the first one and it will be put first in the array, followed by the second. For example, if we have the two bytes 0xABCD in hexadecimal we will get an array of two bytes, the first element of the array will contain 0xAB and the second 0xCD. If we use to_bytes(2, byteorder='little'), then we get little endian representation, meaning that the hexadecimal numberABCD would be stored as an array with the first element being CD and the second AB. Again, here it does not matter as we get only one byte, but we do have to specify it. End of explanation """ import pickle def huffman_compress(original_file, output_file): pq = create_pq() # First pass: count character occurrences. symb2freq = Counter() with open(original_file) as uncompressed_file: for line in uncompressed_file: symb2freq += Counter(line) # Put the occurrences in a priority queue. pq = create_pq() for key, value in symb2freq.items(): insert_in_pq(pq, [value, [key, '']]) # Create the Huffman code. hc = create_huffman_code(pq) # Turn the code to a dictionary for easier lookup. hc_table = { character: encoding for [character, encoding] in hc[1:]} # Second pass: we'll read again the uncompressed file, # we'll compress the contents and save them to the # compressed file as we go. with open(original_file) as uncompressed_file, \ open(output_file, 'wb') as compressed_file: # First save the Huffman encoding... pickle.dump(hc_table, compressed_file) # then save the total number of characters in the original file. pickle.dump(sum(symb2freq.values()), compressed_file) # Use a buffer in which we will be adding the encoded characters; # when the buffer has 8 bits or more we will output a byte and # keep the remaining bits. buffer = '' for line in uncompressed_file: for c in line: # For each character, add the encoding to the buffer. buffer += hc_table[c] # Have we got enough stuff in the buffer to output a byte? while len(buffer) >= 8: # Yes, output a byte. byte = int(buffer[:8], base=2).to_bytes(1, byteorder='big') compressed_file.write(byte) # Keep any remaining stuff in the buffer; that will go out # with the next byte. buffer = buffer[8:] if len(buffer) > 0: # If we have still remaining stuff, it means that part of the last # character encoding was put in the previous byte, and part of it # will go in the last byte; we'll pad zeroes to the end of it. buffer = buffer.ljust(8, '0') byte = int(buffer[:8], base=2).to_bytes(1, byteorder='big') compressed_file.write(byte) """ Explanation: Instead of printing it on screen, we want to save the compressed data. We also need to save the Huffman coding table, otherwise there is no way we will be able to decompress them. So we will create a file, in which we will save the Huffman coding table first, followed by the total number of uncompressed characters and the bytes of the Huffman encoding of our data. We'll need the total number of uncompressed characters to ensure correct decompression. We'll save these two (the table and the number of uncompressed characters) with Python's pickle facility. For each character in the file, we'll get its Huffman encoding. We need to pack the encodings into bytes in order to save them; to do that, we will pack the Huffman encodings into a buffer variable, which we'll be breaking at 8 characters to create and output a byte. Care must be taken in the last byte of the file, which may need to be padded to the right with zeroes, if the sum of the lengths of all Huffman encodings of the characters is not a multiple of 8. End of explanation """ def huffman_decompress(input_file, output_file): with open(input_file, 'rb') as compressed_file,\ open(output_file, 'w') as decompressed_file: # Read the Huffman table. hc_table = pickle.load(compressed_file) # Read the total number of uncompressed characters. num_chars = pickle.load(compressed_file) # Construct an inverse, Huffman decoding table. hc_decoding_table = { v: k for (k, v) in hc_table.items() } # Set a counter for the decompressed characters. num_decompressed = 0 # Keep the Huffman code that we want to decode. encoding = '' # Read the file byte-by-byte. byte = compressed_file.read(1) while byte: # For each byte, get its bit representation. bit_repr = format(int.from_bytes(byte, byteorder='big'), '08b') # Then read it bit-by-bit, extending the current encoding # that we are trying to decode. for bit in bit_repr: encoding += bit # Is this a valid Huffman encoding? if encoding in hc_decoding_table: # Yes, decompress it. decompressed_file.write(hc_decoding_table[encoding]) num_decompressed += 1 # If we have decompressed the expected amount of # characters, we are done; any leftover is just the # padding of the last byte of the file. if num_decompressed == num_chars: break encoding = '' byte = compressed_file.read(1) """ Explanation: Decompressing a file follows the reverse logic. We read the Huffman coding table and reverse it, in order to use it for decoding. Then we read the number of expected decompressed characters. We proceed with reading the file byte-by-byte, converting the bytes to bits, and decoding them as soon as we find a valid Huffman code for a sequence of bits. End of explanation """ # %load huffman.py from collections import Counter import pickle import argparse def create_pq(): return [] def add_last(pq, c): pq.append(c) def root(pq): return 0 def set_root(pq, c): if len(pq) != 0: pq[0] = c def get_data(pq, p): return pq[p] def children(pq, p): if 2*p + 2 < len(pq): return [2*p + 1, 2*p + 2] else: return [2*p + 1] def parent(p): return (p - 1) // 2 def exchange(pq, p1, p2): pq[p1], pq[p2] = pq[p2], pq[p1] def insert_in_pq(pq, c): add_last(pq, c) i = len(pq) - 1 while i != root(pq) and get_data(pq, i) < get_data(pq, parent(i)): p = parent(i) exchange(pq, i, p) i = p def extract_last_from_pq(pq): return pq.pop() def has_children(pq, p): return 2*p + 1 < len(pq) def extract_min_from_pq(pq): c = pq[root(pq)] set_root(pq, extract_last_from_pq(pq)) i = root(pq) while has_children(pq, i): # Use the data stored at each child as the comparison key # for finding the minimum. j = min(children(pq, i), key=lambda x: get_data(pq, x)) if get_data(pq, i) < get_data(pq, j): return c exchange(pq, i, j) i = j return c def create_huffman_code(pq): while len(pq) > 1: # Extract the two minimum items from the priority queue. x = extract_min_from_pq(pq) y = extract_min_from_pq(pq) # Get all the [character, encoding] items associated with x; # as x is the left child of the new node, prepend '0' # to their encodings. for pair in x[1:]: pair[1] = '0' + pair[1] # Do the same for y; as y is the right child of the # new node, prepend '1' to their encodings. for pair in y[1:]: pair[1] = '1' + pair[1] # Insert a new node with the sum of the occurrences # of the two extracted nodes and the updated # [character, encoding] sequences. insert_in_pq(pq, [x[0] + y[0]] + x[1:] + y[1:]) return extract_min_from_pq(pq) def huffman_compress(input_file, output_file): pq = create_pq() # First pass: count character occurrences. symb2freq = Counter() with open(input_file) as uncompressed_file: for line in uncompressed_file: symb2freq += Counter(line) # Put the occurrences in a priority queue. pq = create_pq() for key, value in symb2freq.items(): insert_in_pq(pq, [value, [key, '']]) # Create the Huffman code. hc = create_huffman_code(pq) # Turn the code to a dictionary for easier lookup. hc_table = { character: encoding for [character, encoding] in hc[1:]} # Second pass: we'll read again the uncompressed file, # we'll compress the contents and save them to the # compressed file as we go. with open(input_file) as uncompressed_file, \ open(output_file, 'wb') as compressed_file: # First save the Huffman encoding. pickle.dump(hc_table, compressed_file) # Then save the total number of characters in the input file. pickle.dump(sum(symb2freq.values()), compressed_file) # Use a buffer in which we will be adding the encoded characters; # when the buffer has 8 bits or more we will output a byte and # keep the remaining bits. buffer = '' for line in uncompressed_file: for c in line: # For each character, add the encoding to the buffer. buffer += hc_table[c] # Have we got enough stuff in the buffer to output a byte? while len(buffer) >= 8: # Yes, output a byte byte = int(buffer[:8], base=2).to_bytes(1, byteorder='big') compressed_file.write(byte) # Keep any remaining stuff in the buffer; that will go out # with the next byte. buffer = buffer[8:] if len(buffer) > 0: # If we have still remaining stuff, it means that part of the last # character encoding was put in the previous byte, and part of it # will go in the last byte; we'll pad zeroes to the end of it. buffer = buffer.ljust(8, '0') byte = int(buffer[:8], base=2).to_bytes(1, byteorder='big') compressed_file.write(byte) def huffman_decompress(input_file, output_file): with open(input_file, 'rb') as compressed_file,\ open(output_file, 'w') as decompressed_file: # Read the Huffman table. hc_table = pickle.load(compressed_file) # Read the total number of uncompressed characters. num_chars = pickle.load(compressed_file) # Construct an inverse, Huffman decoding table. hc_decoding_table = { v: k for (k, v) in hc_table.items() } # Set a counter for the decompressed characters. num_decompressed = 0 # Keep the Huffman code that we want to decode. encoding = '' # Read the file byte-by-byte. byte = compressed_file.read(1) while byte: # For each byte, get its bit representation. bit_repr = format(int.from_bytes(byte, byteorder='big'), '08b') # Then read it bit-by-bit, extending the current encoding # that we are trying to decode. for bit in bit_repr: encoding += bit # Is this a valid Huffman encoding? if encoding in hc_decoding_table: # Yes, decompress it. decompressed_file.write(hc_decoding_table[encoding]) num_decompressed += 1 # If we have decompressed the expected amount of # characters, we are done; any leftover is just the # padding of the last byte of the file. if num_decompressed == num_chars: break encoding = '' byte = compressed_file.read(1) if __name__ == "__main__": parser = argparse.ArgumentParser(description= 'Huffman compression/decompression') parser.add_argument('input_file', help='Input file') parser.add_argument('output_file', help='Output file') parser.add_argument('-d', '--decompress', action='store_true', help='Decompress', default=False) args = parser.parse_args() if args.decompress: huffman_decompress(args.input_file, args.output_file) else: huffman_compress(args.input_file, args.output_file) """ Explanation: For convenience, here is a full program that compresses and decompresses files using the Huffman scheme. The usage information is as follows: ``` usage: huffman.py [-h] [-d] input_file output_file Huffman compression/decompression positional arguments: input_file Input file output_file Output file optional arguments: -h, --help show this help message and exit -d, --decompress Decompress ``` End of explanation """ def lzw_compress(message, nb, n): """ Perform LZW compression. Parameters ---------- message : str the message to compress nb : int the number of bits used for each encoding n : int the size of the alphabet Returns ------- compressed : list The encoded message """ compressed = [] # list of encodings max_code = 2**nb - 1 # size of the encoding table # Initialize table with encodings for each character # in the alphabet. table = { chr(i): i for i in range(n) } code = n # this is the encoding for the next unencoded ngram w = "" # current ngram for c in message: wc = w + c # form new ngram # If we have already encountered the new ngram # prepare to add another character to it. if wc in table: w = wc else: # Otherwise we must put out the encoding of the # existing ngram. compressed.append(table[w]) # Start an ngram from the current character. w = c # Check if we can add the non-found ngram # in the table and add it if possible. if code <= max_code: table[wc] = code code += 1 # If we have finished the message, output the encoding # of the current ngram. if w: compressed.append(table[w]) return compressed """ Explanation: If we run it on James Joyce's Ulysses, which is 1,520,795 bytes, we get a compressed version of 884,155 bytes; that is, we achieved a compression ratio of 58%. Lempel-Ziv-Welch Compression We now turn to Lempel-Ziv-Welch (LZW) compression. The code is much more straightforward than with Huffman encoding, because most of the underlying machinery is provided as-is by Python dictionaries. Therefore the following function is a straightforward application of the algorithm described in the book. End of explanation """ compressed = lzw_compress("MELLOW YELLOW FELLOW", nb=8, n=2**7) print(compressed) """ Explanation: We can now test it to see how it works: End of explanation """ import io def lzw_decompress(compressed, nb, n): """ Perform LZW decompression. Parameters ---------- compressed : list the message to decompress nb : int the number of bits used for each encoding n : int the size of the alphabet Returns ------- result : str The decompressed message """ max_code = 2**nb - 1 # size of the decoding table # Initialize the decoding table with reverse encodings # for each character in the alphabet. table = { i : chr(i) for i in range(n) } code = n # this is the encoding for the next unencoded ngram result = io.StringIO() # Output the first character. c = compressed.pop(0) v = table[c] result.write(v) pv = v # previous # For each encoded value in the compressed message: for c in compressed: # If we know the corresponding ngram, get it. if c in table: v = table[c] # If we do not know it, the corresponding ngram # is really the previous ngram with its first # character appended to its end. else: v = pv + pv[0] result.write(v) # If there is room in the decoding table: if code <= max_code: # add the new mapping to it. table[code] = pv + v[0] code += 1 pv = v return result.getvalue() """ Explanation: Similarly, LZW decompression is also a straightforward application of the algorithm in the book. The only substantial change is that instead of building the output by concatenating strings, we use the io.StringIO() class in Python, which allows us to write to a stream representing a string. This is faster than performing a lot of concatenations. End of explanation """ decompressed = lzw_decompress(compressed, nb=8, n=2**7) print(decompressed) """ Explanation: We can test that it works by verifying that it decompresses our compressed message correctly. End of explanation """ ulysses = open('ulysses.txt').read() compressed_ulysses = lzw_compress(ulysses, 12, 2**8) """ Explanation: To see LZW in action, we try compressing a larger text, such as James Joyce's Ulysses. We can try 12 bits for encoding and an encoding table of $2^8$ entries. End of explanation """ s_compressed = 12 * len(compressed_ulysses) s_original = 8 * len(ulysses) print('bits for encoding: 12', f'original size: {s_original}', f'compressed size: {s_compressed}', f'compression ratio: {s_compressed/s_original}') """ Explanation: Then we can see how much we have actually saved. We will multiply the length of compressed_ulysses by 12 and compare with the length of the original text multiplied with 8 (8 bits ASCII). End of explanation """ best_savings = 1 best_nbits = 8 for nbits in range(8, 25): compressed_ulysses = lzw_compress(ulysses, nbits, 2**8) s_compressed = nbits * len(compressed_ulysses) s_original = 8 * len(ulysses) savings = s_compressed / s_original if savings < best_savings: best_savings = savings best_nbits = nbits print(f'bits for encoding: {nbits}', f'original size: {s_original}', f'compressed size: {s_compressed}', f'compression ratio: {savings}') print(f'best savings:{best_savings} best number of encoding bits: {best_nbits}') """ Explanation: Are 12 bits for a each encoding a good choice? We can actually check. We will compare the savings we have for a series of values for the number of encoding bits, from 8 to 24 (inclusive). End of explanation """ # %load lzw.py import argparse def lzw_compress(input_file, output_file, nb, n): """ Perform LZW compression. Parameters ---------- message : str the message to compress nb : int the number of bits used for each encoding n : int the size of the alphabet Returns ------- compressed : list The encoded message """ uncompressed_file = open(input_file) compressed_file = open(output_file, 'wb') max_code = 2**nb - 1 # size of the encoding table # Initialize table with encodings for each character # in the alphabet. table = { chr(i): i for i in range(n) } code = n # this is the encoding for the next unencoded ngram # The necessary bytes to store nb bits nb // 8 rounded up to # next integer; to do that, we add 7 to nb. num_bytes = (nb + 7) // 8 w = "" # current ngram for line in uncompressed_file: for c in line: wc = w + c # form new ngram # If we have already encountered the new ngram # prepare to add another character to it. if wc in table: w = wc else: # Otherwise we must put out the encoding of the # existing ngram. compressed_file.write(table[w].to_bytes(num_bytes, byteorder='big')) # Start an ngram from the current character. w = c # Check if we can add the non-found ngram # in the table and add it if possible. if code <= max_code: table[wc] = code code += 1 # If we have finished the input file, output the encoding of the # current ngram. if w: compressed_file.write(table[w].to_bytes(num_bytes, byteorder='big')) uncompressed_file.close() compressed_file.close() def lzw_decompress(input_file, output_file, nb, n): """ Perform LZW decompression. Parameters ---------- compressed : list the message to decompress nb : int the number of bits used for each encoding n : int the size of the alphabet Returns ------- result : str The decompressed message """ max_code = 2**nb - 1 # size of the decoding table # Initialize the decoding table with reverse encodings # for each character in the alphabet. table = { i : chr(i) for i in range(n) } code = n # this is the encoding for the next unencoded ngram # The necessary bytes to store nb bits nb // 8 rounded up to # next integer; to do that, we add 7 to nb. num_bytes = (nb + 7) // 8 compressed_file = open(input_file, 'rb') decompressed_file = open(output_file, 'w') # Output the first character. bytes = compressed_file.read(num_bytes) c = int.from_bytes(bytes, byteorder='big') v = table[c] decompressed_file.write(v) pv = v # previous # For each encoded value in the compressed message: bytes = compressed_file.read(num_bytes) while bytes: c = int.from_bytes(bytes, byteorder='big') # If we know the corresponding ngram, get it. if c in table: v = table[c] # If we do not know it, the corresponding ngram # is really the previous ngram with its first # character appended to its end. else: v = pv + pv[0] decompressed_file.write(v) # If there is room in the decoding table: if code <= max_code: # add the new mapping to it. table[code] = pv + v[0] code += 1 pv = v bytes = compressed_file.read(num_bytes) decompressed_file.close() compressed_file.close() if __name__ == "__main__": parser = argparse.ArgumentParser(description= "LZW compression/decompression") parser.add_argument('input_file', help='Input file') parser.add_argument('output_file', help='Output file') parser.add_argument("-d", "--decompress", help="decompress", default=False, action="store_true") parser.add_argument("-n", "--nb", help="number of bits of each table entry", type=int, default=16) parser.add_argument("-s", "--size", help="size of alphabet", type=int, default=2**8) args = parser.parse_args() if (args.decompress): lzw_decompress(args.input_file, args.output_file, args.nb, args.size) else: lzw_compress(args.input_file, args.output_file, args.nb, args.size) """ Explanation: We see that the best value is to use 18 bits for encoding. With 8 bits we get no savings at all. That is because we use one byte for each encoding, we have 256 encodings, and our alphabet, 8-bit ASCII, has 256 characters. So we only encode unigrams. As we increase the number of bits we use, we start using more and more ngrams. The savings are increased, but after 18 bits they start decreasing. That is because, although we use many ngrams, we squander too many bits for encoding unigrams and small n-grams. In practice, we can usually only read and store whole bytes, so we settle down for using a multiple of 8 as the number of bits for encoding. The following program performs LZW compression and decompression on files. The usage information is as follows: ``` usage: lzw.py [-h] [-d] [-n NB] [-s SIZE] input_file output_file LZW compression/decompression positional arguments: input_file Input file output_file Output file optional arguments: -h, --help show this help message and exit -d, --decompress decompress -n NB, --nb NB number of bits of each table entry -s SIZE, --size SIZE size of alphabet ``` The default values are 16 bits for each encoding and an alphabet size of 256. End of explanation """
kitu2007/dl_class
embeddings/Skip-Gram-word2vec.ipynb
mit
import time import numpy as np import tensorflow as tf import utils """ Explanation: Skip-gram word2vec In this notebook, I'll lead you through using TensorFlow to implement the word2vec algorithm using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like translations. Readings Here are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material. A really good conceptual overview of word2vec from Chris McCormick First word2vec paper from Mikolov et al. NIPS paper with improvements for word2vec also from Mikolov et al. An implementation of word2vec from Thushan Ganegedara TensorFlow word2vec tutorial Word embeddings When you're dealing with language and words, you end up with tens of thousands of classes to predict, one for each word. Trying to one-hot encode these words is massively inefficient, you'll have one element set to 1 and the other 50,000 set to 0. The word2vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words. Words that show up in similar contexts, such as "black", "white", and "red" will have vectors near each other. There are two architectures for implementing word2vec, CBOW (Continuous Bag-Of-Words) and Skip-gram. <img src="assets/word2vec_architectures.png" width="500"> In this implementation, we'll be using the skip-gram architecture because it performs better than CBOW. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts. First up, importing packages. End of explanation """ from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm import zipfile dataset_folder_path = 'data' dataset_filename = 'text8.zip' dataset_name = 'Text8 Dataset' class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(dataset_filename): with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar: urlretrieve( 'http://mattmahoney.net/dc/text8.zip', dataset_filename, pbar.hook) if not isdir(dataset_folder_path): with zipfile.ZipFile(dataset_filename) as zip_ref: zip_ref.extractall(dataset_folder_path) with open('data/text8') as f: text = f.read() """ Explanation: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space. End of explanation """ words = utils.preprocess(text) print(words[:30]) print("Total words: {}".format(len(words))) print("Unique words: {}".format(len(set(words)))) """ Explanation: Preprocessing Here I'm fixing up the text to make training easier. This comes from the utils module I wrote. The preprocess function coverts any punctuation into tokens, so a period is changed to &lt;PERIOD&gt;. In this data set, there aren't any periods, but it will help in other NLP problems. I'm also removing all words that show up five or fewer times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. If you want to write your own functions for this stuff, go for it. End of explanation """ vocab_to_int, int_to_vocab = utils.create_lookup_tables(words) int_words = [vocab_to_int[word] for word in words] """ Explanation: And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words. End of explanation """ ## Your code here from collections import Counter import random cc = Counter(int_words) tot_count = len(int_words) thres = 1e-5 p_thres = {x: 1- np.sqrt(thres/(y/tot_count)) for x,y in cc.items()} train_words = [word for word in int_words if p_thres[word] < random.random()] dd2 = [int_to_vocab[d] for d in train_data[0:30]]; dd1 = [int_to_vocab[d] for d in int_words[0:30]]; dd2, dd1 """ Explanation: Subsampling Words that show up often such as "the", "of", and "for" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by $$ P(w_i) = 1 - \sqrt{\frac{t}{f(w_i)}} $$ where $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset. I'm going to leave this up to you as an exercise. This is more of a programming challenge, than about deep learning specifically. But, being able to prepare your data for your network is an important skill to have. Check out my solution to see how I did it. Exercise: Implement subsampling for the words in int_words. That is, go through int_words and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is the probability that a word is discarded. Assign the subsampled data to train_words. End of explanation """ def get_target(words, idx, window_size=5): ''' Get a list of words in a window around an index. ''' R = random.randint(1,window_size+1) start = idx-R if (idx-R) > 0 else 0 stop = idx + R target = set(words[start:idx] + words[idx+1:stop+1] ) return list(target) """ Explanation: Making batches Now that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to grab all the words in a window around that word, with size $C$. From Mikolov et al.: "Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $< 1; C >$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels." Exercise: Implement a function get_target that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you choose a random number of words from the window. End of explanation """ def get_batches(words, batch_size, window_size=5): ''' Create a generator of word batches as a tuple (inputs, targets) ''' n_batches = len(words)//batch_size # only full batches words = words[:n_batches*batch_size] for idx in range(0, len(words), batch_size): x, y = [], [] batch = words[idx:idx+batch_size] for ii in range(len(batch)): batch_x = batch[ii] batch_y = get_target(batch, ii, window_size) y.extend(batch_y) x.extend([batch_x]*len(batch_y)) yield x, y """ Explanation: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory. End of explanation """ bb = get_batches(train_data[:120],10,5) """ aa = [1,2] aa.extend([3]) # has to be iterable inside the extend aa.append(4) aa.append([5]) aa """ for x,y in bb: print (x) print (y) break # a1, b1= next(bb) # np.vstack((a1,b1)).T train_graph = tf.Graph() with train_graph.as_default(): inputs = tf.placeholder(tf.int32, shape=[None], name='inputs') labels = tf.placeholder(tf.int32, shape=[None,None], name="labels") """ Explanation: Building the graph From Chris McCormick's blog, we can see the general structure of our network. The input words are passed in as one-hot encoded vectors. This will go into a hidden layer of linear units, then into a softmax layer. We'll use the softmax layer to make a prediction like normal. The idea here is to train the hidden layer weight matrix to find efficient representations for our words. This weight matrix is usually called the embedding matrix or embedding look-up table. We can discard the softmax layer becuase we don't really care about making predictions with this network. We just want the embedding matrix so we can use it in other networks we build from the dataset. I'm going to have you build the graph in stages now. First off, creating the inputs and labels placeholders like normal. Exercise: Assign inputs and labels using tf.placeholder. We're going to be passing in integers, so set the data types to tf.int32. The batches we're passing in will have varying sizes, so set the batch sizes to [None]. To make things work later, you'll need to set the second dimension of labels to None or 1. End of explanation """ n_vocab = len(int_to_vocab) n_embedding = 200 # Number of embedding features with train_graph.as_default(): embedding = tf.Variable(tf.random_uniform((n_vocab, n_embedding),-1.0,1.0))# create embedding weight matrix here embed = tf.nn.embedding_lookup(embedding, inputs) # use tf.nn.embedding_lookup to get the hidden layer output """ Explanation: Embedding The embedding matrix has a size of the number of words by the number of neurons in the hidden layer. So, if you have 10,000 words and 300 hidden units, the matrix will have size $10,000 \times 300$. Remember that we're using one-hot encoded vectors for our inputs. When you do the matrix multiplication of the one-hot vector with the embedding matrix, you end up selecting only one row out of the entire matrix: You don't actually need to do the matrix multiplication, you just need to select the row in the embedding matrix that corresponds to the input word. Then, the embedding matrix becomes a lookup table, you're looking up a vector the size of the hidden layer that represents the input word. <img src="assets/word2vec_weight_matrix_lookup_table.png" width=500> Exercise: Tensorflow provides a convenient function tf.nn.embedding_lookup that does this lookup for us. You pass in the embedding matrix and a tensor of integers, then it returns rows in the matrix corresponding to those integers. Below, set the number of embedding features you'll use (200 is a good start), create the embedding matrix variable, and use tf.nn.embedding_lookup to get the embedding tensors. For the embedding matrix, I suggest you initialize it with a uniform random numbers between -1 and 1 using tf.random_uniform. This TensorFlow tutorial will help if you get stuck. End of explanation """ # Number of negative labels to sample n_sampled = 100 with train_graph.as_default(): softmax_w = tf.Variable(tf.truncated_normal((n_vocab,n_embedding),stddev=0.1))# create softmax weight matrix here softmax_b = tf.Variable(tf.zeros(n_vocab))# create softmax biases here # Calculate the loss using negative sampling loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b, embed, labels, n_sampled, n_vocab) cost = tf.reduce_mean(loss) optimizer = tf.train.AdamOptimizer().minimize(cost) """ Explanation: Negative sampling For every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct label, but only a small number of incorrect labels. This is called "negative sampling". Tensorflow has a convenient function to do this, tf.nn.sampled_softmax_loss. Exercise: Below, create weights and biases for the softmax layer. Then, use tf.nn.sampled_softmax_loss to calculate the loss. Be sure to read the documentation to figure out how it works. End of explanation """ with train_graph.as_default(): ## From Thushan Ganegedara's implementation valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 100 # pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent valid_examples = np.array(random.sample(range(valid_window), valid_size//2)) valid_examples = np.append(valid_examples, random.sample(range(1000,1000+valid_window), valid_size//2)) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # We use the cosine distance: norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True)) normalized_embedding = embedding / norm valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset) similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding)) # If the checkpoints directory doesn't exist: !mkdir checkpoints """ Explanation: Validation This code is from Thushan Ganegedara's implementation. Here we're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings. End of explanation """ epochs = 10 batch_size = 1000 window_size = 10 with train_graph.as_default(): saver = tf.train.Saver() with tf.Session(graph=train_graph) as sess: iteration = 1 loss = 0 sess.run(tf.global_variables_initializer()) for e in range(1, epochs+1): batches = get_batches(train_words, batch_size, window_size) start = time.time() for x, y in batches: feed = {inputs: x, labels: np.array(y)[:, None]} train_loss, _ = sess.run([cost, optimizer], feed_dict=feed) loss += train_loss if iteration % 100 == 0: end = time.time() print("Epoch {}/{}".format(e, epochs), "Iteration: {}".format(iteration), "Avg. Training loss: {:.4f}".format(loss/100), "{:.4f} sec/batch".format((end-start)/100)) loss = 0 start = time.time() if iteration % 1000 == 0: ## From Thushan Ganegedara's implementation # note that this is expensive (~20% slowdown if computed every 500 steps) sim = similarity.eval() for i in range(valid_size): valid_word = int_to_vocab[valid_examples[i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k+1] log = 'Nearest to %s:' % valid_word for k in range(top_k): close_word = int_to_vocab[nearest[k]] log = '%s %s,' % (log, close_word) print(log) iteration += 1 save_path = saver.save(sess, "checkpoints/text8.ckpt") embed_mat = sess.run(normalized_embedding) """ Explanation: Training Below is the code to train the network. Every 100 batches it reports the training loss. Every 1000 batches, it'll print out the validation words. End of explanation """ with train_graph.as_default(): saver = tf.train.Saver() with tf.Session(graph=train_graph) as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) embed_mat = sess.run(embedding) """ Explanation: Restore the trained network if you need to: End of explanation """ %matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt from sklearn.manifold import TSNE viz_words = 500 tsne = TSNE() embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :]) """ Explanation: Visualizing the word vectors Below we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out this post from Christopher Olah to learn more about T-SNE and other ways to visualize high-dimensional data. End of explanation """ embed_tsne = tsne.fit_transform(np.random.random((63000,200))) fig, ax = plt.subplots(figsize=(14, 14)) for idx in range(viz_words): plt.scatter(*embed_tsne[idx, :], color='steelblue') plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7) """ Explanation: embed_tsne = tsne.fit_transform(random.random(63000,200)) End of explanation """
InsightSoftwareConsortium/SimpleITK-Notebooks
Python/03_Image_Details.ipynb
apache-2.0
import SimpleITK as sitk # If the environment variable SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT is set, this will override the ReadImage # function so that it also resamples the image to a smaller size (testing environment is memory constrained). %run setup_for_testing %matplotlib inline import matplotlib.pyplot as plt import numpy as np from ipywidgets import interact, fixed import os OUTPUT_DIR = "Output" # Utility method that either downloads data from the Girder repository or # if already downloaded returns the file name for reading from disk (cached data). %run update_path_to_download_script from downloaddata import fetch_data as fdata """ Explanation: SimpleITK Images, They're Physical Objects <a href="https://mybinder.org/v2/gh/InsightSoftwareConsortium/SimpleITK-Notebooks/master?filepath=Python%2F03_Image_Details.ipynb"><img style="float: right;" src="https://mybinder.org/badge_logo.svg"></a> SimpleITK conventions: * Image access is in x,y,z order, image.GetPixel(x,y,z) or image[x,y,z], with zero based indexing. * If the output of an ITK filter has non-zero starting index, then the index will be set to 0, and the origin adjusted accordingly. The unique feature of SimpleITK (derived from ITK) as a toolkit for image manipulation and analysis is that it views <b>images as physical objects occupying a bounded region in physical space</b>. In addition images can have different spacing between pixels along each axis, and the axes are not necessarily orthogonal. The following figure illustrates these concepts. <img src="ImageOriginAndSpacing.png" style="width:700px"/><br><br> Pixel Types The pixel type is represented as an enumerated type. The following is a table of the enumerated list. <table> <tr><td>sitkUInt8</td><td>Unsigned 8 bit integer</td></tr> <tr><td>sitkInt8</td><td>Signed 8 bit integer</td></tr> <tr><td>sitkUInt16</td><td>Unsigned 16 bit integer</td></tr> <tr><td>sitkInt16</td><td>Signed 16 bit integer</td></tr> <tr><td>sitkUInt32</td><td>Unsigned 32 bit integer</td></tr> <tr><td>sitkInt32</td><td>Signed 32 bit integer</td></tr> <tr><td>sitkUInt64</td><td>Unsigned 64 bit integer</td></tr> <tr><td>sitkInt64</td><td>Signed 64 bit integer</td></tr> <tr><td>sitkFloat32</td><td>32 bit float</td></tr> <tr><td>sitkFloat64</td><td>64 bit float</td></tr> <tr><td>sitkComplexFloat32</td><td>complex number of 32 bit float</td></tr> <tr><td>sitkComplexFloat64</td><td>complex number of 64 bit float</td></tr> <tr><td>sitkVectorUInt8</td><td>Multi-component of unsigned 8 bit integer</td></tr> <tr><td>sitkVectorInt8</td><td>Multi-component of signed 8 bit integer</td></tr> <tr><td>sitkVectorUInt16</td><td>Multi-component of unsigned 16 bit integer</td></tr> <tr><td>sitkVectorInt16</td><td>Multi-component of signed 16 bit integer</td></tr> <tr><td>sitkVectorUInt32</td><td>Multi-component of unsigned 32 bit integer</td></tr> <tr><td>sitkVectorInt32</td><td>Multi-component of signed 32 bit integer</td></tr> <tr><td>sitkVectorUInt64</td><td>Multi-component of unsigned 64 bit integer</td></tr> <tr><td>sitkVectorInt64</td><td>Multi-component of signed 64 bit integer</td></tr> <tr><td>sitkVectorFloat32</td><td>Multi-component of 32 bit float</td></tr> <tr><td>sitkVectorFloat64</td><td>Multi-component of 64 bit float</td></tr> <tr><td>sitkLabelUInt8</td><td>RLE label of unsigned 8 bit integers</td></tr> <tr><td>sitkLabelUInt16</td><td>RLE label of unsigned 16 bit integers</td></tr> <tr><td>sitkLabelUInt32</td><td>RLE label of unsigned 32 bit integers</td></tr> <tr><td>sitkLabelUInt64</td><td>RLE label of unsigned 64 bit integers</td></tr> </table> There is also sitkUnknown, which is used for undefined or erroneous pixel ID's. It has a value of -1. The 64-bit integer types are not available on all distributions. When not available the value is sitkUnknown. End of explanation """ logo = sitk.ReadImage(fdata("SimpleITK.jpg")) plt.imshow(sitk.GetArrayViewFromImage(logo)) plt.axis("off"); """ Explanation: Load your first image and display it End of explanation """ image_3D = sitk.Image(256, 128, 64, sitk.sitkInt16) image_2D = sitk.Image(64, 64, sitk.sitkFloat32) image_2D = sitk.Image([32, 32], sitk.sitkUInt32) image_RGB = sitk.Image([128, 64], sitk.sitkVectorUInt8, 3) """ Explanation: Image Construction There are a variety of ways to create an image. The following components are required for a complete definition of an image: <ol> <li>Pixel type [fixed on creation, no default]: unsigned 32 bit integer, sitkVectorUInt8, etc., see list above.</li> <li> Sizes [fixed on creation, no default]: number of pixels/voxels in each dimension. This quantity implicitly defines the image dimension.</li> <li> Origin [default is zero]: coordinates of the pixel/voxel with index (0,0,0) in physical units (i.e. mm).</li> <li> Spacing [default is one]: Distance between adjacent pixels/voxels in each dimension given in physical units.</li> <li> Direction matrix [default is identity]: mapping, rotation, between direction of the pixel/voxel axes and physical directions.</li> </ol> Initial pixel/voxel values are set to zero. End of explanation """ image_3D.SetOrigin((78.0, 76.0, 77.0)) image_3D.SetSpacing([0.5, 0.5, 3.0]) print(f"origin: {image_3D.GetOrigin()}") print(f"size: {image_3D.GetSize()}") print(f"spacing: {image_3D.GetSpacing()}") print(f"direction: {image_3D.GetDirection()}\n") image_3D["origin"] = (2.0, 4.0, 8.0) image_3D["spacing"] = [0.25, 0.25, 5.0] print(f'origin: {image_3D["origin"]}') print(f"size: {image_3D.GetSize()}") print(f'spacing: {image_3D["spacing"]}') print(f'direction: {image_3D["direction"]}') """ Explanation: Basic Image Attributes You can change the image origin, spacing and direction using function calls. Making such changes to an image already containing data should be done cautiously. You can also use the dictionary like bracket operator to make these changes, with the keywords 'origin', 'spacing', 'direction'. End of explanation """ print(image_3D.GetDimension()) print(image_3D.GetWidth()) print(image_3D.GetHeight()) print(image_3D.GetDepth()) """ Explanation: Image dimension queries: End of explanation """ print(image_2D.GetSize()) print(image_2D.GetDepth()) """ Explanation: What is the depth of a 2D image? End of explanation """ print(image_3D.GetPixelIDValue()) print(image_3D.GetPixelIDTypeAsString()) print(image_3D.GetNumberOfComponentsPerPixel()) """ Explanation: Pixel/voxel type queries: End of explanation """ print(image_RGB.GetDimension()) print(image_RGB.GetSize()) print(image_RGB.GetNumberOfComponentsPerPixel()) """ Explanation: What is the dimension and size of a Vector image and its data? End of explanation """ help(image_3D.GetPixel) print(image_3D.GetPixel(0, 0, 0)) image_3D.SetPixel(0, 0, 0, 1) print(image_3D.GetPixel(0, 0, 0)) # This can also be done using Pythonic notation. print(image_3D[0, 0, 1]) image_3D[0, 0, 1] = 2 print(image_3D[0, 0, 1]) """ Explanation: Accessing Pixels and Slicing The Image class's member functions GetPixel and SetPixel provide an ITK-like interface for pixel access. End of explanation """ # Brute force sub-sampling logo_subsampled = logo[::2, ::2] # Get the sub-image containing the word Simple simple = logo[0:115, :] # Get the sub-image containing the word Simple and flip it simple_flipped = logo[115:0:-1, :] n = 4 plt.subplot(n, 1, 1) plt.imshow(sitk.GetArrayViewFromImage(logo)) plt.axis("off") plt.subplot(n, 1, 2) plt.imshow(sitk.GetArrayViewFromImage(logo_subsampled)) plt.axis("off") plt.subplot(n, 1, 3) plt.imshow(sitk.GetArrayViewFromImage(simple)) plt.axis("off") plt.subplot(n, 1, 4) plt.imshow(sitk.GetArrayViewFromImage(simple_flipped)) plt.axis("off"); """ Explanation: Slicing of SimpleITK images returns a copy of the image data. This is similar to slicing Python lists and differs from the "view" returned by slicing numpy arrays. End of explanation """ # Version 0: get the numpy array and assign the value via broadcast - later on you will need to construct # a new image from the array logo_pixels = sitk.GetArrayFromImage(logo) logo_pixels[0:10, 0:10] = [0, 255, 0] # Version 1: generates an error, the image slicing returns a new image and you cannot assign a value to an image # logo[0:10,0:10] = [255,0,0] # Version 2: image slicing returns a new image, so all assignments here will not have any effect on the original # 'logo' image logo_subimage = logo[0:10, 0:10] for x in range(0, 10): for y in range(0, 10): logo_subimage[x, y] = [255, 0, 0] # Version 3: modify the original image, iterate and assign a value to each pixel # for x in range(0,10): # for y in range(0,10): # logo[x,y] = [255,0,0] plt.subplot(2, 1, 1) plt.imshow(sitk.GetArrayViewFromImage(logo)) plt.axis("off") plt.subplot(2, 1, 2) plt.imshow(logo_pixels) plt.axis("off"); """ Explanation: Draw a square on top of the logo image: After running this cell, uncomment "Version 3" and see its effect. End of explanation """ logo = sitk.ReadImage(fdata("SimpleITK.jpg")) sz_x = 10 sz_y = 10 color_channels = [ sitk.Image([sz_x, sz_y], sitk.sitkUInt8), sitk.Image([sz_x, sz_y], sitk.sitkUInt8) + 255, sitk.Image([sz_x, sz_y], sitk.sitkUInt8), ] color_image = sitk.Compose(color_channels) color_image.SetSpacing([0.5, 0.5]) print(logo.GetSpacing()) print(color_image.GetSpacing()) # Set sub image using the Paste function logo = sitk.Paste( destinationImage=logo, sourceImage=color_image, sourceSize=color_image.GetSize(), sourceIndex=[0, 0], destinationIndex=[0, 0], ) # Set sub image using slicing. logo[20 : 20 + sz_x, 0:sz_y] = color_image sitk.Show(logo) """ Explanation: We can also paste one image into the other, either using the PasteImageFilter with its procedural interface or using a more Pythonic approach with image slicing. Note that for these operations SimpleITK treats the images as arrays of pixels and not as spatial objects. In the example below the fact that the images have different spacings is ignored. End of explanation """ z_slice = image_3D.GetDepth() // 2 result1 = image_3D[..., z_slice] result2 = image_3D[:, :, z_slice] # Check whether the two slices are equivalent, same pixel content and same origin, spacing, direction cosine. # Uncomment the following line to see what happens if the slices do not have the same origin. # result1['origin'] = [o+1.0 for o in result1['origin']] try: if np.all(sitk.GetArrayViewFromImage(result1 - result2) == 0): print("Slices equivalent.") else: print("Slices not equivalent (intensity differences).") except Exception: print("Slices not equivalent (physical differences).") """ Explanation: Finally, SimpleITK images also support the usage of ellipsis. Below we use both available approaches to obtain a slice. End of explanation """ nda = sitk.GetArrayFromImage(image_3D) print(image_3D.GetSize()) print(nda.shape) nda = sitk.GetArrayFromImage(image_RGB) print(image_RGB.GetSize()) print(nda.shape) gabor_image = sitk.GaborSource(size=[64, 64], frequency=0.03) # Getting a numpy array view on the image data doesn't copy the data nda_view = sitk.GetArrayViewFromImage(gabor_image) plt.imshow(nda_view, cmap=plt.cm.Greys_r) plt.axis("off") # Trying to assign a value to the array view will throw an exception nda_view[0, 0] = 255 """ Explanation: Conversion between numpy and SimpleITK SimpleITK and numpy indexing access is in opposite order! SimpleITK: image[x,y,z]<br> numpy: image_numpy_array[z,y,x] From SimpleITK to numpy We have two options for converting from SimpleITK to numpy: * GetArrayFromImage(): returns a copy of the image data. You can then freely modify the data as it has no effect on the original SimpleITK image. * GetArrayViewFromImage(): returns a view on the image data which is useful for display in a memory efficient manner. You cannot modify the data and the view will be invalid if the original SimpleITK image is deleted. End of explanation """ nda = np.zeros((10, 20, 3)) # if this is supposed to be a 3D gray scale image [x=3, y=20, z=10] img = sitk.GetImageFromArray(nda) print(img.GetSize()) # if this is supposed to be a 2D color image [x=20,y=10] img = sitk.GetImageFromArray(nda, isVector=True) print(img.GetSize()) """ Explanation: From numpy to SimpleITK Remember to to set the image's origin, spacing, and possibly direction cosine matrix. The default values may not match the physical dimensions of your image. End of explanation """ def my_algorithm(image_as_numpy_array): # res is the image result of your algorithm, has the same grid size as the original image res = image_as_numpy_array return res # Starting with SimpleITK img = sitk.ReadImage(fdata("training_001_mr_T1.mha")) # Custom Python code working on a numpy array. npa_res = my_algorithm(sitk.GetArrayFromImage(img)) # Converting back to SimpleITK (assumes we didn't move the image in space as we copy the information from the original) res_img = sitk.GetImageFromArray(npa_res) res_img.CopyInformation(img) # Continuing to work with SimpleITK images res_img - img """ Explanation: There and back again The following code cell illustrates a situation where your code is a combination of SimpleITK methods and custom Python code which works with intensity values or labels outside of SimpleITK. This is a reasonable approach when you implement an algorithm in Python and don't care about the physical spacing of things (you are actually assuming the volume is isotropic). End of explanation """ img1 = sitk.Image(24, 24, sitk.sitkUInt8) img1[0, 0] = 0 img2 = sitk.Image(img1.GetSize(), sitk.sitkUInt8) img2.SetDirection([0, 1, 0.5, 0.5]) img2.SetSpacing([0.5, 0.8]) img2.SetOrigin([0.000001, 0.000001]) img2[0, 0] = 255 img3 = img1 + img2 print(img3[0, 0]) """ Explanation: Image operations SimpleITK supports basic arithmetic operations between images, <b>taking into account their physical space</b>. Repeatedly run this cell. Fix the error (comment out the SetDirection, then SetSpacing). Why doesn't the SetOrigin line cause a problem? How close do two physical attributes need to be in order to be considered equivalent? End of explanation """ img = sitk.ReadImage(fdata("SimpleITK.jpg")) print(img.GetPixelIDTypeAsString()) # write as PNG and BMP sitk.WriteImage(img, os.path.join(OUTPUT_DIR, "SimpleITK.png")) sitk.WriteImage(img, os.path.join(OUTPUT_DIR, "SimpleITK.bmp")) """ Explanation: Reading and Writing SimpleITK can read and write images stored in a single file, or a set of files (e.g. DICOM series). Images stored in the DICOM format have a meta-data dictionary associated with them, which is populated with the DICOM tags. When a DICOM series is read as a single image, the meta-data information is not available since DICOM tags are specific to each file. If you need the meta-data, you have three options: Using the object oriented interface's ImageSeriesReader class, configure it to load the tags using the MetaDataDictionaryArrayUpdateOn method and possibly the LoadPrivateTagsOn method if you need the private tags. Once the series is read you can access the meta-data from the series reader using the GetMetaDataKeys, HasMetaDataKey, and GetMetaData. Using the object oriented interface's ImageFileReader, set a specific slice's file name and only read it's meta-data using the ReadImageInformation method which only reads the meta-data but not the bulk pixel information. Once the meta-data is read you can access it from the file reader using the GetMetaDataKeys, HasMetaDataKey, and GetMetaData. Using the object oriented interface's ImageFileReader, set a specific slice's file name and read it. Or using the procedural interface's, ReadImage function, read a specific file. You can then access the meta-data directly from the Image using the GetMetaDataKeys, HasMetaDataKey, and GetMetaData. In the following cell, we read an image in JPEG format, and write it as PNG and BMP. File formats are deduced from the file extension. Appropriate pixel type is also set - you can override this and force a pixel type of your choice. End of explanation """ # Several pixel types, some make sense in this case (vector types) and some are just show # that the user's choice will force the pixel type even when it doesn't make sense # (e.g. sitkVectorUInt16 or sitkUInt8). pixel_types = { "sitkUInt8": sitk.sitkUInt8, "sitkUInt16": sitk.sitkUInt16, "sitkFloat64": sitk.sitkFloat64, "sitkVectorUInt8": sitk.sitkVectorUInt8, "sitkVectorUInt16": sitk.sitkVectorUInt16, "sitkVectorFloat64": sitk.sitkVectorFloat64, } def pixel_type_dropdown_callback(pixel_type, pixel_types_dict): # specify the file location and the pixel type we want img = sitk.ReadImage(fdata("SimpleITK.jpg"), pixel_types_dict[pixel_type]) print(img.GetPixelIDTypeAsString()) print(img[0, 0]) plt.imshow(sitk.GetArrayViewFromImage(img)) plt.axis("off") interact( pixel_type_dropdown_callback, pixel_type=list(pixel_types.keys()), pixel_types_dict=fixed(pixel_types), ); """ Explanation: Read an image in JPEG format and cast the pixel type according to user selection. End of explanation """ data_directory = os.path.dirname(fdata("CIRS057A_MR_CT_DICOM/readme.txt")) series_ID = "1.2.840.113619.2.290.3.3233817346.783.1399004564.515" # Get the list of files belonging to a specific series ID. reader = sitk.ImageSeriesReader() # Use the functional interface to read the image series. original_image = sitk.ReadImage( reader.GetGDCMSeriesFileNames(data_directory, series_ID) ) # Write the image. output_file_name_3D = os.path.join(OUTPUT_DIR, "3DImage.mha") sitk.WriteImage(original_image, output_file_name_3D) # Read it back again. written_image = sitk.ReadImage(output_file_name_3D) # Check that the original and written image are the same. statistics_image_filter = sitk.StatisticsImageFilter() statistics_image_filter.Execute(original_image - written_image) # Check that the original and written files are the same print( f"Max, Min differences are : {statistics_image_filter.GetMaximum()}, {statistics_image_filter.GetMinimum()}" ) """ Explanation: Read a DICOM series and write it as a single mha file End of explanation """ sitk.WriteImage( sitk.Cast(sitk.RescaleIntensity(written_image), sitk.sitkUInt8), [ os.path.join(OUTPUT_DIR, f"slice{i:03d}.jpg") for i in range(written_image.GetSize()[2]) ], ) """ Explanation: Write an image series as JPEG. The WriteImage function receives a volume and a list of images names and writes the volume according to the z axis. For a displayable result we need to rescale the image intensities (default is [0,255]) since the JPEG format requires a cast to the UInt8 pixel type. End of explanation """ data_directory = os.path.dirname(fdata("CIRS057A_MR_CT_DICOM/readme.txt")) # Global variable 'selected_series' is updated by the interact function selected_series = "" file_reader = sitk.ImageFileReader() def DICOM_series_dropdown_callback(series_to_load, series_dictionary): global selected_series # Print some information about the series from the meta-data dictionary # DICOM standard part 6, Data Dictionary: http://medical.nema.org/medical/dicom/current/output/pdf/part06.pdf file_reader.SetFileName(series_dictionary[series_to_load][0]) file_reader.ReadImageInformation() tags_to_print = { "0010|0010": "Patient name: ", "0008|0060": "Modality: ", "0008|0021": "Series date: ", "0008|0080": "Institution name: ", "0008|1050": "Performing physician's name: ", } for tag in tags_to_print: try: print(tags_to_print[tag] + file_reader.GetMetaData(tag)) except: # Ignore if the tag isn't in the dictionary pass selected_series = series_to_load # Directory contains multiple DICOM studies/series, store # in dictionary with key being the series ID reader = sitk.ImageSeriesReader() series_file_names = {} series_IDs = reader.GetGDCMSeriesIDs(data_directory) # Check that we have at least one series if series_IDs: for series in series_IDs: series_file_names[series] = reader.GetGDCMSeriesFileNames( data_directory, series ) interact( DICOM_series_dropdown_callback, series_to_load=list(series_IDs), series_dictionary=fixed(series_file_names), ) else: print("Data directory does not contain any DICOM series.") reader.SetFileNames(series_file_names[selected_series]) img = reader.Execute() # Display the image slice from the middle of the stack, z axis z = int(img.GetDepth() / 2) plt.imshow(sitk.GetArrayViewFromImage(img)[z, :, :], cmap=plt.cm.Greys_r) plt.axis("off"); """ Explanation: Select a specific DICOM series from a directory and only then load user selection. End of explanation """ xrays = [sitk.ReadImage(fdata("photo.dcm")), sitk.ReadImage(fdata("cxr.dcm"))] # We can access the image's metadata via the GetMetaData method or # via the bracket operator, the latter is more concise. for img in xrays: print(f'Image Modality: {img.GetMetaData("0008|0060")}') print(f"Number of channels: {img.GetNumberOfComponentsPerPixel()}") print(f'Photomertic Interpretation: {img["0028|0004"]}') # Display the image using Fiji which expects the channels to be in the RGB color space sitk.Show(img) """ Explanation: DICOM photometric interpretation Generally speaking, SimpleITK represents color images as multi-channel images independent of a color space. It is up to you to interpret the channels correctly based on additional color space knowledge prior to using them for display or any other purpose. The following cells illustrate reading and interpretation of interesting images in DICOM format. The first is a photograph of an X-ray on a light box (yes, there are some strange things in the wild). The second is a digital X-ray. While both of these are chest X-rays they differ in image modality (0008|0060) and in Photometric Interpretation (0028|0004), color space in DICOM speak. Things to note: 1. When using SimpleITK to read a color DICOM image, the channel values will be transformed to the RGB color space. 2. When using SimpleITK to read a scalar image, it is assumed that the lowest intensity value is black and highest white. If the photometric interpretation tag is MONOCHROME2 (lowest value displayed as black) nothing is done. If it is MONOCHROME1 (lowest value displayed as white), the pixel values are inverted. End of explanation """ def srgb2gray(image): # Convert sRGB image to gray scale and rescale results to [0,255] channels = [ sitk.VectorIndexSelectionCast(image, i, sitk.sitkFloat32) for i in range(image.GetNumberOfComponentsPerPixel()) ] # linear mapping I = 1 / 255.0 * (0.2126 * channels[0] + 0.7152 * channels[1] + 0.0722 * channels[2]) # nonlinear gamma correction I = ( I * sitk.Cast(I <= 0.0031308, sitk.sitkFloat32) * 12.92 + I ** (1 / 2.4) * sitk.Cast(I > 0.0031308, sitk.sitkFloat32) * 1.055 - 0.055 ) return sitk.Cast(sitk.RescaleIntensity(I), sitk.sitkUInt8) sitk.Show(srgb2gray(xrays[0])) """ Explanation: The first, is a color sRGB image while an x-ray should be a single channel gray scale image. We will convert sRGB to gray scale. End of explanation """ file_reader = sitk.ImageFileReader() # Get a tuple listing all registered ImageIOs image_ios_tuple = file_reader.GetRegisteredImageIOs() print("The supported image IOs are: " + str(image_ios_tuple)) # Optionally, just print the reader and see which ImageIOs are registered print("\n", file_reader) # Specify the JPEGImageIO and read file file_reader.SetImageIO("JPEGImageIO") file_reader.SetFileName(fdata("SimpleITK.jpg")) logo = file_reader.Execute() # Unfortunately, now reading a non JPEG image will fail try: file_reader.SetFileName(fdata("cthead1.png")) ct_head = file_reader.Execute() except RuntimeError: print("Got a RuntimeError exception.") # We can reset the file reader to its default behaviour so that it automatically # selects the ImageIO file_reader.SetImageIO("") ct_head = file_reader.Execute() """ Explanation: Finer control The ImageFileReader's interface provides finer control for reading, allowing us to require the use of a specific IO and allowing us to stream parts of an image to memory without reading the whole image (supported by a subset of the ImageIO components). Selecting a Specific Image IO SimpleITK relies on the registered ImageIOs to indicate whether they can read a file and then perform the reading. This is done automatically, going over the set of ImageIOs and inquiring whether they can read the given file. The first one that can is selected. If multiple ImageIOs can read a specific format, we do not know which one was used for the task (e.g. TIFFImageIO and LSMImageIO, which is derived from it, can both read tif files). In some cases you may want to use a specific IO, possibly one that reads the file faster, or supports a more complete feature set associated with the file format. The next cell shows how to find out which ImageIOs are registered and specify the one we want. End of explanation """ file_reader = sitk.ImageFileReader() file_reader.SetFileName(fdata("vm_head_rgb.mha")) file_reader.ReadImageInformation() image_size = file_reader.GetSize() start_index, extract_size = zip( *[(int(1.0 / 3.0 * sz), int(1.0 / 3.0 * sz)) for sz in file_reader.GetSize()] ) file_reader.SetExtractIndex(start_index) file_reader.SetExtractSize(extract_size) sitk.Show(file_reader.Execute()) """ Explanation: Streaming Image IO Some of the ImageIOs supported in SimpleITK allow you to stream in sub-regions of an image without the need to read the whole image into memory. This is very useful when you are memory constrained (either your images are large or your memory is limited). The ImageIOs that support streaming include HDF5ImageIO, VTKImageIO, NiftiImageIO, MetaImageIO... The next cell shows how to read in a sub/cropped image from a larger image. We read the central 1/3 portion of the image [1/3,2/3] of the original image. End of explanation """ def streaming_subtract(image1_file_name, image2_file_name, parts): """ Subtract image1 from image2 using 'parts' number of sub-regions. """ file_reader = sitk.ImageFileReader() file_reader.SetFileName(image1_file_name) file_reader.ReadImageInformation() image_size = file_reader.GetSize() # Create the result image, initially empty result_img = sitk.Image( file_reader.GetSize(), file_reader.GetPixelID(), file_reader.GetNumberOfComponents(), ) result_img.SetSpacing(file_reader.GetSpacing()) result_img.SetOrigin(file_reader.GetOrigin()) result_img.SetDirection(file_reader.GetDirection()) extract_size = list(file_reader.GetSize()) extract_size[-1] = extract_size[-1] // parts current_index = [0] * file_reader.GetDimension() for i in range(parts): if i == ( parts - 1 ): # last region may be smaller than the standard extract region extract_size[-1] = image_size[-1] - current_index[-1] file_reader.SetFileName(image1_file_name) file_reader.SetExtractIndex(current_index) file_reader.SetExtractSize(extract_size) sub_image1 = file_reader.Execute() file_reader.SetFileName(image2_file_name) file_reader.SetExtractIndex(current_index) file_reader.SetExtractSize(extract_size) sub_image2 = file_reader.Execute() # Paste the result of subtracting the two subregions into their location in the result_img result_img = sitk.Paste( result_img, sub_image1 - sub_image2, extract_size, [0] * file_reader.GetDimension(), current_index, ) current_index[-1] += extract_size[-1] return result_img # If you have the patience and RAM you can try this with the vm_head_rgb.mha image. image1_file_name = fdata("fib_sem_bacillus_subtilis.mha") image2_file_name = fdata("fib_sem_bacillus_subtilis.mha") """ Explanation: The next cells show how to subtract two large images from each other with a smaller memory footprint than the direct approach, though the code is much more complex and slower than the direct approach: sitk.ReadImage(image1_file_name) - sitk.ReadImage(image2_file_name) Note: The code assume that the two images occupy the same spatial region (origin, spacing, direction cosine matrix). End of explanation """ result_img = streaming_subtract(image1_file_name, image2_file_name, parts=5) del result_img result_img = sitk.ReadImage(image1_file_name) - sitk.ReadImage(image2_file_name) del result_img """ Explanation: A simple way of seeing your system's memory usage is to open the appropriate monitoring program: (Windows) Resource Monitor; (Linux) top; (OS X) Activity Monitor. This will give you a rough idea of the memory used by the streaming vs. non streaming approaches. End of explanation """
bwbadger/mifid2-rts
rts/Using sample trades in an SI calculation.ipynb
bsd-3-clause
# First we need to import the libraries we'll be needing import rts2_annex3 import pandas as pd import random random.seed() # Get the root of the RTS 2 Annex III taxonomy root = rts2_annex3.class_root # Get the Asset Class we would like to generate trades for asset_class = root.asset_class_by_name("Credit Derivatives") # Ask the Asset class to generate some sample trade sample_trades = asset_class.make_test_samples(number=500) print("Generated {count} trades. here is one example:\n".format(count=len(sample_trades))) print(vars(random.choice(sample_trades))) """ Explanation: SI Calculation: an example using generated sample trades Here we will generate sample trades from the RTS 2 Annex III taxonomy. Each sample trade is then enriched with the information needed run an SI calculation. Once the trade data is assembled the the data normally provided by the regulator is synthesised. Lastly, the SI calculations are run The SI calculation includes a number of tests. See Article 15 (page 39) of Brussels, 25.4.2016 C(2016) 2398 final for the three derivatives tests: a, b and c. Generate some trade data The first step is to use the RTS 2 Annex III taxonomy to generate some sample trades. End of explanation """ # Typically trades invole two parties, the bank and a counterparty (the client). # For the SI calculation we just need the bank LEI. for sample_trade in sample_trades: sample_trade.lei = 'Our_bank_LEI' # Print the LEI from one of the trades (they're all the same!) print(random.choice(sample_trades).lei) """ Explanation: LEIs In a real firm with real trades we would need to know the LEI (Legal Entity Identifier - ISO 17442) of the legal entity which did each trade because SI status is reported distinctly for each legal entity, identified by an LEI. Firms may do trades within a single legal entity, perhaps to move risk from one trading desk to another. These are called intra-entity trades and must be filtered out before the SI calculation. For this example we'll say that all the trades we generated are inter-entity trades (i.e. trades between distinct legal entities), so we count them all. In this example we'll use just one LEI, and not even a valid one, but it will suffice for the example. End of explanation """ # We give each sample trade a trade date in a 30 day range of dates # and an ISO week number (c.f. https://en.wikipedia.org/wiki/ISO_week_date) import datetime sample_dates = [] today = datetime.date.today() for day_number in range(-30, 0): a_date = today + datetime.timedelta(day_number) if a_date.weekday() < 6: sample_dates.append(a_date) for sample_trade in sample_trades: selected_date = random.choice(sample_dates) sample_trade.trade_date = selected_date sample_trade.trade_date_week = selected_date.isocalendar()[1] # Print the one of the modified sample trades a_trade = random.choice(sample_trades) print(a_trade.trade_date) print(a_trade.trade_date_week) """ Explanation: Trade Date The SI calculation includes checks for frequency, the number of trades done in a single week. To work that out we need a trade date for each trade. Here we'll just use a few dates and add these to our sample trades. End of explanation """ # We define our MICs. A MIC value is always 4 charcters in length. The values used # here are made-up nonsense, but good enough for an illustration eea_mics = ['EEA1', 'EEA2', 'EEA3'] non_eea_mics = ['OFF1', 'OFF2', 'OFF3', 'OFF4'] all_mics = eea_mics + non_eea_mics # Add a MIC to each sample trade for sample_trade in sample_trades: sample_trade.mic = random.choice(all_mics) # Print the one of the modified sample trades a_trade = random.choice(sample_trades) print(a_trade.mic) """ Explanation: MIC A MIC (Market Identifier Code - ISO 10383) is an ID for a trading venue, for example a stock exchange. The regulator is expected to provide a list of MIC values which identify venues which are recognised for the purposes of the SI calculation. Trades which are done on vs. off recognised venues are counted differently. End of explanation """ # Own Account is a boolean; either this is a trade which the regulator views as being # on the bank's own account, or not. I use a random boolean with a probability. own_account_probability = 0.25 for sample_trade in sample_trades: sample_trade.own_account = random.random() < own_account_probability # Print the one of the modified sample trades a_trade = random.choice(sample_trades) print(a_trade.own_account) """ Explanation: Own Account We need to know if a trade was done on the firms own account. Such trades are counted differently. End of explanation """ # Client Order is also simply a boolean. Either this is a trade which was done # in response to a client order, or not. I use a random boolean. client_order_probability = 0.5 for sample_trade in sample_trades: sample_trade.client_order = random.random() < client_order_probability # Print the one of the modified sample trades a_trade = random.choice(sample_trades) print(a_trade.client_order) """ Explanation: Client Order We need to know if a trade was done in response to a client order. Such trades are counted differently. End of explanation """ # Add a random-ish Euro Notional amount of n million EUR to each trade notional_amounts = [x * 1000000 for x in [1, 1, 1, 2, 2, 5, 10, 25]] for sample_trade in sample_trades: sample_trade.eur_notional = random.choice(notional_amounts) # Print the one of the modified sample trades a_trade = random.choice(sample_trades) print(a_trade.eur_notional) """ Explanation: EUR Notional Another measure used by the SI calculation is the EUR notional value of each trade. Here we assign a notional value to each trade. End of explanation """ # Now classify each trade and add the JSON classification back to the trade for sample_trade in sample_trades: classification = root.classification_for(subject=sample_trade) sample_trade.rts2_classification = classification # Print the one of the modified sample trades a_trade = random.choice(sample_trades) print(a_trade.rts2_classification.as_json(indent=4)) """ Explanation: RTS 2 Annex III Classification The last step before we start the SI calculation is to add the RTS 2 Annex III classification to each trade. End of explanation """ import collections import json class SIReport(object): @classmethod def for_trades(cls, trades): new_report = cls() new_report.add_trades(trades) return new_report def __init__(self): self.trades = [] self.sub_classes = dict() self._number_of_weeks = None def add_trades(self, trades): self.trades.extend(trades) for trade in trades: rts2_string = trade.rts2_classification.as_json() if not rts2_string in self.sub_classes: self.sub_classes[rts2_string] = RTS2SubClass(self, trade) sub_class = self.sub_classes[rts2_string] sub_class.add_trade(trade) self._number_of_weeks = None @property def number_of_weeks(self): if self._number_of_weeks is None: min_week = min(self.trades, key=lambda t: t.trade_date_week).trade_date_week max_week = max(self.trades, key=lambda t: t.trade_date_week).trade_date_week self._number_of_weeks = max_week - min_week + 1 return self._number_of_weeks def si_sub_classes(self): return [sub_class for sub_class in self.sub_classes.values() if sub_class.si_status()] def report(self): si_sub_classes = self.si_sub_classes() report_items = [ 'This LEI is an SI for {si_count} of {all_count} ' 'sub classes traded over {weeks} weeks.'.format( si_count=len(si_sub_classes), all_count=len(self.sub_classes), weeks=self.number_of_weeks)] for sub_class in si_sub_classes: report_items.append(sub_class.report()) return json.dumps(report_items, indent=4) class RTS2SubClass(object): def __init__(self, si_report, sample_trade): self.si_report = si_report self.sample_trade = sample_trade self.is_liquid = random.random() < 0.5 self.trades = [] self._aggregations = None @property def aggregations(self): # I keep all the computed results in one object so I can drop the cache # if a trade is added if self._aggregations is None: self._aggregations = Aggregations(sub_class=self) return self._aggregations def add_trade(self, trade): self.trades.append(trade) self._aggregations = None def si_status(self): """ This is the SI Calculation. It is applied distinctly to each sub class. It's quite simple once everything is aggregated. Note that these are the rules for derivatives trades only. """ agg = self.aggregations if self.is_liquid \ and agg.trade_count >= (0.025 * agg.eu_trade_count) \ and agg.avg_weekly_trades >= 1: return "SI - (a) Liquid instrument test" if not self.is_liquid \ and agg.avg_weekly_trades >= 1: return "SI - (b) Non-liquid instrument test" if agg.notional_sum >= (0.25 * agg.lei_notional_sum) \ or agg.notional_sum >= (0.01 * agg.eu_notional_sum): return "SI - (c) Notional size test" return None def report(self): report_list = ['Status: {status}.'.format(status=self.si_status())] agg_dict = vars(self.aggregations).copy() del agg_dict['sub_class'] del agg_dict['si_trades'] report_list.append(agg_dict) report_list.append(self.sample_trade.rts2_classification.classification_dict()) return report_list class Aggregations(object): """ Each instance of Aggregations represents the subset of the trades for a sub class which are OTC client orders on the own account of the LEI. The SI calculation tests are with respect to this subset of the trades. """ def __init__(self, sub_class): self.sub_class = sub_class # Build the aggregations for this sub class self.si_trades = [ trade for trade in self.sub_class.trades if (not trade.mic in eea_mics) # OTC and trade.own_account # Traded on own account and trade.client_order] # in response to a client order self.trade_count = len(self.si_trades) self.notional_sum = sum([abs(trade.eur_notional) for trade in self.si_trades]) self.avg_weekly_trades = self.trade_count / self.sub_class.si_report.number_of_weeks # Now I synthesise the EU figures which should really come from the regulator self.eu_trade_count = self.trade_count * 40 + random.choice([ self.trade_count * -1, self.trade_count]) if self.notional_sum: self.eu_notional_sum = self.notional_sum * 100 + random.choice([ self.notional_sum * -1, self.notional_sum]) else: self.eu_notional_sum = 1 # I keep this sub class sum here so it gets flushed if trades added self.lei_notional_sum = sum( [abs(trade.eur_notional) for trade in self.sub_class.trades]) """ Explanation: Do the SI calculation The "calculation" is really a set of filters 3 filters (a, b & c as shown below), which might identify an firm as being an SI for a particular RTS 2 subclass. The filters all focus on the count and notional sums of trades which are OTC (not traded on EEA recognised venue), own account (traded using the banks money) and in response to a client order. Here we'll call this subset of trades SI-trades. Tests a & b also ask if a particulat RTS 2 sub class is liquid. Whether an instrument is liquid or not is determined by the regulator, and the regulator must publish this, together with the total trade count and total notional, for each sub class. a. If the RTS 2 Annex III sub class is liquid - and the count of SI-trades >= 2.5% of eu_rts2_trade_count - and average weekly number of SI-trades >= 1 b. If the RTS 2 Annex III sub class is not liquid - and average weekly number of SI-trades >= 1 c. If the sum of EUR notional for SI-trades is - >= 25% of all trades notional for the LEI - or >= 1% of EU trade notional An Object Oriented Calculator Here we build a tiny Python application to do the SI calculation on the generated trades. The result of the calculation is a JSON report of the RTS 2 Annex III sub classes for which our LEI is a Systematic Internaliser. We define 3 classes: * SIReport - This represents the report spanning all RTS 2 classes for one LEI * RTS2SubClass - This represents the information in a report for just one RTS 2 sub class * Aggregations - Represents the various counts and sums for one RTS 2 sub class If you want to see the report, scroll past the code to the next text box. End of explanation """ # First create an instance of our report report = SIReport.for_trades(sample_trades) # Then get the JSON report and print it print(report.report()) """ Explanation: Run the report Having defined the classes we should be able to just run the report. Note that the result is different every time the report is run because the EU totals are re-synthesised each time and are random. With real data the report would be stable, and indeed this test could be changed to always produce the same results; the current implementaion is intended to show some variety. End of explanation """ def eu_data_for_sub_class(sub_class): return dict( rts2_classification=sub_class.sample_trade.rts2_classification.as_json(), is_liquid=sub_class.is_liquid, eu_trade_count=sub_class.aggregations.eu_trade_count, eu_notional_sum=sub_class.aggregations.eu_notional_sum, ) # The set of all trades (by LEI if there is more than one) sub_classes = pd.DataFrame\ .from_records([eu_data_for_sub_class(s) for s in list(report.sub_classes.values())])\ .set_index('rts2_classification') # Put the essential information for each trade into a Pandas table. def si_details_from_sample(sample_trade): return dict( lei=sample_trade.lei, trade_date=sample_trade.trade_date, trade_date_week=sample_trade.trade_date_week, mic=sample_trade.mic, own_account=sample_trade.own_account, client_order=sample_trade.client_order, eur_notional=sample_trade.eur_notional, rts2_classification=sample_trade.rts2_classification.as_json(), ) all_trades = pd.DataFrame.from_records([si_details_from_sample(s) for s in sample_trades]) # Get the sum of all trades by RTS 2 sub class and add it as a column to the # This should exactly match the figure in the OO report. lei_notional_sum_series = \ all_trades[['rts2_classification', 'eur_notional']]\ .groupby(by='rts2_classification')\ .sum() sub_classes['lei_notional_sum'] = lei_notional_sum_series # Get the trades which are OTC own account client trades si_trades = all_trades[ ~all_trades.mic.isin(eea_mics) & all_trades.own_account & all_trades.client_order] # For the SI trades, group by RTS 2 classification geting counts and notional sums si_agg_series = si_trades[['rts2_classification', 'eur_notional']]\ .groupby(by='rts2_classification')\ .agg(['count', 'sum']) agg_df = pd.DataFrame(si_agg_series) agg_df.columns = ['trade_count', 'notional_sum'] sub_classes2 = pd.merge( sub_classes.reset_index(), agg_df.reset_index(), how='inner', on='rts2_classification') # Add a column for the average number of trades per week min_week_number = all_trades['trade_date_week'].min() max_week_number = all_trades['trade_date_week'].max() number_of_weeks = max_week_number - min_week_number + 1 sub_classes3 = sub_classes2.copy() sub_classes3['avg_weekly_trades'] = sub_classes3['trade_count']\ .apply(lambda x: x / number_of_weeks) """ Explanation: A Declarative Calculator Here we use Pandas to run the calculation in a more declarative, relational kind of way. First we pick up the EU data from the OO model so the results here will be the same as the results in the OO report above. End of explanation """ # Filter a fa = sub_classes3.copy() fa[(fa.is_liquid) & (fa.trade_count >= (fa.eu_trade_count * 0.025)) & (fa.avg_weekly_trades >= 1)] # Filter b fb = sub_classes3.copy() fb[(~fb.is_liquid) & (fb.avg_weekly_trades >= 1)] # Filter c fc = sub_classes3.copy() fc[ (fc.notional_sum >= (fc.lei_notional_sum * 0.25)) | (fc.notional_sum >= (fc.eu_notional_sum * 0.01))] """ Explanation: This is the calculation End of explanation """
vlad17/vlad17.github.io
assets/2019-10-20-prngs.ipynb
apache-2.0
import numpy as np from multiprocessing import Pool from scipy.stats import ttest_1samp def something_random(_): return np.random.randn() n = 2056 print("stddev {:.5f}".format(1 / np.sqrt(n))) with Pool(4) as p: mu = np.mean(p.map(something_random, range(n))) mu """ Explanation: Numpy Gems, Part 2 Trying out something new here with a Jupyter notebook blog post. We'll keep this short. Let's see how it goes! In this episode, we'll be exploring random number generators. Usually, you use psuedo-random number generators (PRNGs) to simulate randomness for simulations. In general, randomness is a great way of avoiding doing integrals because it's cheaper to average a few things than integrate over the whole space, and things tend to have accurate averages after just a few samples... This is the Monte Carlo Method. That said, since the priority is speed here, and the more samples, the better, we want to take as many samples as possible, so parallelism seems viable. This occurs in lots of scenarios: Stochastic simulations of physical systems for risk assessment Machine learning experiments (e.g., to show a new training method is consistently effective) Numerical estimation of integrals for scientific equations Bootstrap estimation in statistics For all of these situations, we also usually want replicatable studies. Seeding is great for making the random PRNG sequence deterministic for one thread, but how do you do this for multiple threads? End of explanation """ np.random.seed(1) n = 256 seeds = np.random.randint(2 ** 32, size=n) def something_random(i): np.random.seed(seeds[i]) return np.random.randn() with Pool(8) as p: mu = np.mean(p.map(something_random, range(n))) print(mu * np.sqrt(n)) """ Explanation: OK, so not seeding (using the system default of time-based seeding) gives us dependent trials, and that can really mess up the experiment and it prevents the very determinism we need! End of explanation """ from numpy.random import SeedSequence, default_rng ss = SeedSequence(12345) n = 2 ** 16 child_seeds = ss.spawn(n) def something_random(s): rng = default_rng(s) return rng.normal() with Pool(4) as p: mu = np.mean(p.map(something_random, child_seeds)) print(mu * np.sqrt(n)) """ Explanation: The common solution I see for this is what we see above, or using i directly as the seed. It kind of works, in this case, but for the default numpy PRNG, the Mersenne Twister, it's not a good strategy. Here's the full discussion in the numpy docs. To short circuit to the "gem" ahead of time, the solution is to use the new API. End of explanation """ # aperitif numpy trick -- get bits, fast! def fastbits(n): nbytes = (n + 7) // 8 # == ceil(n / 8) but without using floats (gross!) return np.unpackbits(np.frombuffer(np.random.bytes(nbytes), np.uint8))[:n] %%timeit np.random.randint(2, size=(10 * 1000 * 1000)) %%timeit fastbits(10 * 1000 * 1000) # Attempt 1: will lining up random # streams break a chi-square test? n = 1000 * 1000 * 10 np.random.seed(1) x1 = fastbits(n) x2 = fastbits(n) np.random.seed(2) y1 = fastbits(n) from scipy.stats import chisquare def simple_pairwise(a, b): # do a simple pairwise check on equilength arrays dof = 4 - 1 # build a contingency table for cases 00 10 01 11 c = np.bincount(a + b * 2) return chisquare(c) print('random', simple_pairwise(x1, x2)) print('seeds 1-2', simple_pairwise(x1, y1)) # Ok... not so easy, clearly dependence is not just "pointwise" # between streams but across streams... Maybe a generic # compression algorithm will notice the difference if we just # appended import tempfile import os def size(x): if os.path.isfile('/tmp/x.bz2'): os.remove('/tmp/x.bz2') with open('/tmp/x', 'wb') as f: f.write(x.tobytes()) ! bzip2 -z /tmp/x return os.path.getsize('/tmp/x.bz2') def rbytes(n): return np.frombuffer(np.random.bytes(n), np.uint8) trials = 256 np.random.seed(trials) n = 1000 * 1000 print('random', size(rbytes(n * trials))) re_seeded = [] for i in range(trials): np.random.seed(i) re_seeded.append(rbytes(n)) a = np.concatenate(re_seeded) print('seeds 0-255', size(a)) """ Explanation: That said, I think the fun part is in trying to break the old PRNG seeding method to make this gem more magical. That is, the rest of this blog post is going to be trying to find non-randomness that occurs when you seed in a n invalid way. End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/test-institute-2/cmip6/models/sandbox-3/landice.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'test-institute-2', 'sandbox-3', 'landice') """ Explanation: ES-DOC CMIP6 Model Properties - Landice MIP Era: CMIP6 Institute: TEST-INSTITUTE-2 Source ID: SANDBOX-3 Topic: Landice Sub-Topics: Glaciers, Ice. Properties: 30 (21 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:45 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Software Properties 3. Grid 4. Glaciers 5. Ice 6. Ice --&gt; Mass Balance 7. Ice --&gt; Mass Balance --&gt; Basal 8. Ice --&gt; Mass Balance --&gt; Frontal 9. Ice --&gt; Dynamics 1. Key Properties Land ice key properties 1.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of land surface model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of land surface model code End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.ice_albedo') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "prescribed" # "function of ice age" # "function of ice density" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Ice Albedo Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Specify how ice albedo is modelled End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.4. Atmospheric Coupling Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Which variables are passed between the atmosphere and ice (e.g. orography, ice mass) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.5. Oceanic Coupling Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Which variables are passed between the ocean and ice End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "ice velocity" # "ice thickness" # "ice temperature" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.6. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Which variables are prognostically calculated in the ice model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Software Properties Software properties of land ice code 2.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3. Grid Land ice grid 3.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of the grid in the land ice scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 3.2. Adaptive Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is an adative grid being used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.base_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.3. Base Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The base resolution (in metres), before any adaption End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.resolution_limit') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.4. Resolution Limit Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If an adaptive grid is being used, what is the limit of the resolution (in metres) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.grid.projection') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.5. Projection Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The projection of the land ice grid (e.g. albers_equal_area) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.glaciers.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Glaciers Land ice glaciers 4.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of glaciers in the land ice scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.glaciers.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the treatment of glaciers, if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 4.3. Dynamic Areal Extent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Does the model include a dynamic glacial extent? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Ice Ice sheet and ice shelf 5.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of the ice sheet and ice shelf in the land ice scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.grounding_line_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "grounding line prescribed" # "flux prescribed (Schoof)" # "fixed grid size" # "moving grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 5.2. Grounding Line Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify the technique used for modelling the grounding line in the ice sheet-ice shelf coupling End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.ice_sheet') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.3. Ice Sheet Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are ice sheets simulated? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.ice_shelf') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.4. Ice Shelf Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are ice shelves simulated? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Ice --&gt; Mass Balance Description of the surface mass balance treatment 6.1. Surface Mass Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how and where the surface mass balance (SMB) is calulated. Include the temporal coupling frequeny from the atmosphere, whether or not a seperate SMB model is used, and if so details of this model, such as its resolution End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Ice --&gt; Mass Balance --&gt; Basal Description of basal melting 7.1. Bedrock Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the implementation of basal melting over bedrock End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Ocean Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the implementation of basal melting over the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Ice --&gt; Mass Balance --&gt; Frontal Description of claving/melting from the ice shelf front 8.1. Calving Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the implementation of calving from the front of the ice shelf End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.2. Melting Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the implementation of melting from the front of the ice shelf End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.dynamics.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Ice --&gt; Dynamics ** 9.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General description if ice sheet and ice shelf dynamics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.dynamics.approximation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "SIA" # "SAA" # "full stokes" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9.2. Approximation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Approximation type used in modelling ice dynamics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 9.3. Adaptive Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there an adaptive time scheme for the ice scheme? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.landice.ice.dynamics.timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 9.4. Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestep (in seconds) of the ice scheme. If the timestep is adaptive, then state a representative timestep. End of explanation """
mne-tools/mne-tools.github.io
0.23/_downloads/b2637a9801fb152d611a08a816cc5583/sensor_regression.ipynb
bsd-3-clause
# Authors: Tal Linzen <linzen@nyu.edu> # Denis A. Engemann <denis.engemann@gmail.com> # Jona Sassenhagen <jona.sassenhagen@gmail.com> # # License: BSD (3-clause) import pandas as pd import mne from mne.stats import linear_regression, fdr_correction from mne.viz import plot_compare_evokeds from mne.datasets import kiloword # Load the data path = kiloword.data_path() + '/kword_metadata-epo.fif' epochs = mne.read_epochs(path) print(epochs.metadata.head()) """ Explanation: Analysing continuous features with binning and regression in sensor space Predict single trial activity from a continuous variable. A single-trial regression is performed in each sensor and timepoint individually, resulting in an :class:mne.Evoked object which contains the regression coefficient (beta value) for each combination of sensor and timepoint. This example shows the regression coefficient; the t and p values are also calculated automatically. Here, we repeat a few of the analyses from :footcite:DufauEtAl2015. This can be easily performed by accessing the metadata object, which contains word-level information about various psycholinguistically relevant features of the words for which we have EEG activity. For the general methodology, see e.g. :footcite:HaukEtAl2006. References .. footbibliography:: End of explanation """ name = "Concreteness" df = epochs.metadata df[name] = pd.cut(df[name], 11, labels=False) / 10 colors = {str(val): val for val in df[name].unique()} epochs.metadata = df.assign(Intercept=1) # Add an intercept for later evokeds = {val: epochs[name + " == " + val].average() for val in colors} plot_compare_evokeds(evokeds, colors=colors, split_legend=True, cmap=(name + " Percentile", "viridis")) """ Explanation: Psycholinguistically relevant word characteristics are continuous. I.e., concreteness or imaginability is a graded property. In the metadata, we have concreteness ratings on a 5-point scale. We can show the dependence of the EEG on concreteness by dividing the data into bins and plotting the mean activity per bin, color coded. End of explanation """ names = ["Intercept", name] res = linear_regression(epochs, epochs.metadata[names], names=names) for cond in names: res[cond].beta.plot_joint(title=cond, ts_args=dict(time_unit='s'), topomap_args=dict(time_unit='s')) """ Explanation: We observe that there appears to be a monotonic dependence of EEG on concreteness. We can also conduct a continuous analysis: single-trial level regression with concreteness as a continuous (although here, binned) feature. We can plot the resulting regression coefficient just like an Event-related Potential. End of explanation """ reject_H0, fdr_pvals = fdr_correction(res["Concreteness"].p_val.data) evoked = res["Concreteness"].beta evoked.plot_image(mask=reject_H0, time_unit='s') """ Explanation: Because the :func:~mne.stats.linear_regression function also estimates p values, we can -- after applying FDR correction for multiple comparisons -- also visualise the statistical significance of the regression of word concreteness. The :func:mne.viz.plot_evoked_image function takes a mask parameter. If we supply it with a boolean mask of the positions where we can reject the null hypothesis, points that are not significant will be shown transparently, and if desired, in a different colour palette and surrounded by dark contour lines. End of explanation """
shumway/srt_bootcamp
Mandelbrot_CPU_Example.ipynb
mit
import numpy as np import bokeh.plotting as bk bk.output_notebook() from numba import jit from timeit import default_timer as timer from IPython.html.widgets import interact, interact_manual, fixed, FloatText """ Explanation: CPU Acceleration of Mandelbrot Generation In this example we use numba to accelerate the generation of the Mandelbrot set. The numba package allows us to compile python bytecode directly to machine instructions. It uses the LLVM compiler under the hood to compile optimized native code on the fly. End of explanation """ @jit(nopython=True) def mandel(x, y, max_iters): """ Return the number of iterations for the complex sequence z -> z**2 + c to exceed 2.0, where c = x + iy. """ c = complex(x, y) z = 0j for i in range(max_iters): z = z**2 + c if (z.real * z.real + z.imag * z.imag > 4.0): return i return max_iters """ Explanation: Recall that the Mandelbrot set is the set of complex numbers $c$ for which the sequence $z_n$ stays bounded, where the sequence start from $z_0 = 0$ and is generated from the map $$ z_{n+1} = z_n^2 + c.$$ First we'll make a function to calculate how long before the sequence $z \rightarrow z^2 + c$ diverges. As the condition for divergence, we'll check to see when $|z|^2 > 4$. We will limit the check to some number max_iters, perhaps 255. End of explanation """ @jit(nopython=True) def make_fractal(xmin, xmax, ymin, ymax, data, max_iters): height, width = data.shape dx = (xmax - xmin) / width dy = (ymax - ymin) / height for i in range(width): x = xmin + dx * (i + 0.5) for j in range(height): y = ymin + dy * (j + 0.5) data[j,i] = mandel(x, y, max_iters) return data """ Explanation: Next we make a function to create the fracal. It will fill a two-dimensional integer array data with the number of iterations before the sequence diverged. Points inside the Mandelbrot set will have the value max_iters. End of explanation """ N = 768, 512 data = np.zeros(N, np.uint8) xmin, xmax, ymin, ymax = -2.0, 1.0, -1.0, 1.0 start = timer() make_fractal(xmin, xmax, ymin, ymax, data, 255) end = timer() print("Generated fractal image in {time:.3f} ms".format(time = 1000 * (end - start))) fig = bk.figure(x_range=[xmin, xmax], y_range=[ymin, ymax], width=768, height=512) fig.image(image=[data], x=[xmin], y=[ymin], dw=[xmax-xmin], dh=[ymax-ymin], palette="YlOrBr9") bk.show(fig) """ Explanation: Now we'll generate a fractal. We'll generate an image 1536 by 1024, covering $-2 \le x\le +1$ and $-1 \le y \le +1$. We also put timer commands around the function call so we can see how long it took. End of explanation """ def calculate_plot(x, y, logscale): width = 3 * 10 ** logscale height = 2 * 10 ** logscale xmin, xmax = x - width/2, x + width/2 ymin, ymax = y - height/2, y + height/2 start = timer() make_fractal(xmin, xmax, ymin, ymax, data, 255) end = timer() print("Generated fractal image in {time:.3f} ms".format(time = 1000 * (end - start))) fig = bk.figure(x_range=[xmin, xmax], y_range=[ymin, ymax], width=768, height=512) fig.image(image=[data], x=[xmin], y=[ymin], dw=[xmax-xmin], dh=[ymax-ymin], palette="YlOrBr9") bk.show(fig) calculate_plot(-1.4,0,-2) calculate_plot(-1.405,0,-7) """ Explanation: We can make this recalculate interactively by creating a function that returns a plot. Since we want to zoom over many orders of magnitude, the function arguments will be the center (x,y) and base-10 logarithm of the scale. End of explanation """ interact_manual(calculate_plot, x=FloatText(-0.003001005), y=FloatText(0.64400092), logscale=(-8,0,0.1)) """ Explanation: Using IPython widgets and the ”interact_manual” command, we can make this more interactive. Note that most of the delay is the time to send the generate the graphical image and send it from the server. As you zoom in, round-off error in the floating point precision of the math will cause numerical artifacts. End of explanation """
mne-tools/mne-tools.github.io
0.22/_downloads/f760cc2f1a5d6c625b1e14a0b05176dd/plot_ecog.ipynb
bsd-3-clause
# Authors: Eric Larson <larson.eric.d@gmail.com> # Chris Holdgraf <choldgraf@gmail.com> # Adam Li <adam2392@gmail.com> # # License: BSD (3-clause) import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation import mne from mne.viz import plot_alignment, snapshot_brain_montage print(__doc__) # paths to mne datasets - sample ECoG and FreeSurfer subject misc_path = mne.datasets.misc.data_path() sample_path = mne.datasets.sample.data_path() subject = 'sample' subjects_dir = sample_path + '/subjects' """ Explanation: Working with ECoG data MNE supports working with more than just MEG and EEG data. Here we show some of the functions that can be used to facilitate working with electrocorticography (ECoG) data. This example shows how to use: ECoG data channel locations in subject's MRI space projection onto a surface For an example that involves sEEG data, channel locations in MNI space, or projection into a volume, see tut_working_with_seeg. End of explanation """ # In this tutorial, the electrode coordinates are assumed to be in meters elec_df = pd.read_csv(misc_path + '/ecog/sample_ecog_electrodes.tsv', sep='\t', header=0, index_col=None) ch_names = elec_df['name'].tolist() ch_coords = elec_df[['x', 'y', 'z']].to_numpy(dtype=float) ch_pos = dict(zip(ch_names, ch_coords)) # Ideally the nasion/LPA/RPA will also be present from the digitization, here # we use fiducials estimated from the subject's FreeSurfer MNI transformation: lpa, nasion, rpa = mne.coreg.get_mni_fiducials( subject, subjects_dir=subjects_dir) lpa, nasion, rpa = lpa['r'], nasion['r'], rpa['r'] """ Explanation: Let's load some ECoG electrode locations and names, and turn them into a :class:mne.channels.DigMontage class. First, use pandas to read in the .tsv file. End of explanation """ montage = mne.channels.make_dig_montage( ch_pos, coord_frame='mri', nasion=nasion, lpa=lpa, rpa=rpa) print('Created %s channel positions' % len(ch_names)) """ Explanation: Now we make a :class:mne.channels.DigMontage stating that the ECoG contacts are in the FreeSurfer surface RAS (i.e., MRI) coordinate system. End of explanation """ trans = mne.channels.compute_native_head_t(montage) print(trans) """ Explanation: Now we get the :term:trans that transforms from our MRI coordinate system to the head coordinate frame. This transform will be applied to the data when applying the montage so that standard plotting functions like :func:mne.viz.plot_evoked_topomap will be aligned properly. End of explanation """ # first we'll load in the sample dataset raw = mne.io.read_raw_edf(misc_path + '/ecog/sample_ecog.edf') # drop bad channels raw.info['bads'].extend([ch for ch in raw.ch_names if ch not in ch_names]) raw.load_data() raw.drop_channels(raw.info['bads']) raw.crop(0, 2) # just process 2 sec of data for speed # attach montage raw.set_montage(montage) # set channel types to ECoG (instead of EEG) raw.set_channel_types({ch_name: 'ecog' for ch_name in raw.ch_names}) """ Explanation: Now that we have our montage, we can load in our corresponding time-series data and set the montage to the raw data. End of explanation """ fig = plot_alignment(raw.info, subject=subject, subjects_dir=subjects_dir, surfaces=['pial'], trans=trans, coord_frame='mri') mne.viz.set_3d_view(fig, 200, 70, focalpoint=[0, -0.005, 0.03]) xy, im = snapshot_brain_montage(fig, montage) """ Explanation: We can then plot the locations of our electrodes on our subject's brain. We'll use :func:~mne.viz.snapshot_brain_montage to save the plot as image data (along with xy positions of each electrode in the image), so that later we can plot frequency band power on top of it. <div class="alert alert-info"><h4>Note</h4><p>These are not real electrodes for this subject, so they do not align to the cortical surface perfectly.</p></div> End of explanation """ gamma_power_t = raw.copy().filter(30, 90).apply_hilbert( envelope=True).get_data() alpha_power_t = raw.copy().filter(8, 12).apply_hilbert( envelope=True).get_data() gamma_power = gamma_power_t.mean(axis=-1) alpha_power = alpha_power_t.mean(axis=-1) """ Explanation: Next, we'll compute the signal power in the gamma (30-90 Hz) and alpha (8-12 Hz) bands. End of explanation """ # Convert from a dictionary to array to plot xy_pts = np.vstack([xy[ch] for ch in raw.info['ch_names']]) # colormap to view spectral power cmap = 'viridis' # Create a 1x2 figure showing the average power in gamma and alpha bands. fig, axs = plt.subplots(1, 2, figsize=(20, 10)) # choose a colormap range wide enough for both frequency bands _gamma_alpha_power = np.concatenate((gamma_power, alpha_power)).flatten() vmin, vmax = np.percentile(_gamma_alpha_power, [10, 90]) for ax, band_power, band in zip(axs, [gamma_power, alpha_power], ['Gamma', 'Alpha']): ax.imshow(im) ax.set_axis_off() sc = ax.scatter(*xy_pts.T, c=band_power, s=200, cmap=cmap, vmin=vmin, vmax=vmax) ax.set_title(f'{band} band power', size='x-large') fig.colorbar(sc, ax=axs) """ Explanation: Now let's use matplotlib to overplot frequency band power onto the electrodes which can be plotted on top of the brain from :func:~mne.viz.snapshot_brain_montage. End of explanation """ # create an initialization and animation function # to pass to FuncAnimation def init(): """Create an empty frame.""" return paths, def animate(i, activity): """Animate the plot.""" paths.set_array(activity[:, i]) return paths, # create the figure and apply the animation of the # gamma frequency band activity fig, ax = plt.subplots(figsize=(5, 5)) ax.imshow(im) ax.set_axis_off() paths = ax.scatter(*xy_pts.T, c=np.zeros(len(xy_pts)), s=200, cmap=cmap, vmin=vmin, vmax=vmax) fig.colorbar(paths, ax=ax) ax.set_title('Gamma frequency over time (Hilbert transform)', size='large') # avoid edge artifacts and decimate, showing just a short chunk sl = slice(100, 150) show_power = gamma_power_t[:, sl] anim = animation.FuncAnimation(fig, animate, init_func=init, fargs=(show_power,), frames=show_power.shape[1], interval=100, blit=True) """ Explanation: Say we want to visualize the evolution of the power in the gamma band, instead of just plotting the average. We can use matplotlib.animation.FuncAnimation to create an animation and apply this to the brain figure. End of explanation """ evoked = mne.EvokedArray( gamma_power_t[:, sl], raw.info, tmin=raw.times[sl][0]) stc = mne.stc_near_sensors(evoked, trans, subject, subjects_dir=subjects_dir) clim = dict(kind='value', lims=[vmin * 0.9, vmin, vmax]) brain = stc.plot(surface='pial', hemi='both', initial_time=0.68, colormap='viridis', clim=clim, views='parietal', subjects_dir=subjects_dir, size=(500, 500)) # You can save a movie like the one on our documentation website with: # brain.save_movie(time_dilation=50, interpolation='linear', framerate=10, # time_viewer=True) """ Explanation: Alternatively, we can project the sensor data to the nearest locations on the pial surface and visualize that: End of explanation """
CrowdTruth/CrowdTruth-core
tutorial/notebooks/Custom Platform - Multiple Choice Task - Person Type Annotation in Video.ipynb
apache-2.0
import pandas as pd test_data = pd.read_csv("../data/custom-platform-person-video-multiple-choice.csv") test_data.head() """ Explanation: CrowdTruth for Multiple Choice Tasks: Person Type Annotation in Video on a Custom Platform In this tutorial, we will apply CrowdTruth metrics to a multiple choice crowdsourcing task for Person Type Annotation from video fragments. The workers were asked to watch a video of about 3-5 seconds and then pick from a multiple choice list which are the types of person that appear in the video fragment. The task was executed on a custom platform. For more crowdsourcing annotation task examples, click here. This is a screenshot of the task as it appeared to workers: " A sample dataset for this task is available in this file, containing raw output from the crowd on FigureEight. Download the file and place it in a folder named data that has the same root as this notebook. Now you can check your data: End of explanation """ import crowdtruth from crowdtruth.configuration import DefaultConfig """ Explanation: Declaring a pre-processing configuration The pre-processing configuration defines how to interpret the raw crowdsourcing input. To do this, we need to define a configuration class. First, we import the default CrowdTruth configuration class: End of explanation """ class TestConfig(DefaultConfig): inputColumns = ["videolocation", "subtitles", "imagetags", "subtitletags"] outputColumns = ["selected_answer"] customPlatformColumns = ["judgmentId", "unitId", "workerId", "startedAt", "submittedAt"] # processing of a closed task open_ended_task = False annotation_vector = ["archeologist", "architect", "artist", "astronaut", "athlete", "businessperson","celebrity", "chef", "criminal", "engineer", "farmer", "fictionalcharacter", "journalist", "judge", "lawyer", "militaryperson", "model", "monarch", "philosopher", "politician", "presenter", "producer", "psychologist", "scientist", "sportsmanager", "writer", "none", "other"] def processJudgments(self, judgments): # pre-process output to match the values in annotation_vector for col in self.outputColumns: # transform to lowercase judgments[col] = judgments[col].apply(lambda x: str(x).lower()) # remove square brackets from annotations judgments[col] = judgments[col].apply(lambda x: str(x).replace('[','')) judgments[col] = judgments[col].apply(lambda x: str(x).replace(']','')) # remove the quotes around the annotations judgments[col] = judgments[col].apply(lambda x: str(x).replace('"','')) return judgments """ Explanation: Our test class inherits the default configuration DefaultConfig, while also declaring some additional attributes that are specific to the Person Type Annotation in Video task: inputColumns: list of input columns from the .csv file with the input data outputColumns: list of output columns from the .csv file with the answers from the workers customPlatformColumns: a list of columns from the .csv file that defines a standard annotation tasks, in the following order - judgment id, unit id, worker id, started time, submitted time. This variable is used for input files that do not come from AMT or FigureEight (formarly known as CrowdFlower). annotation_separator: string that separates between the crowd annotations in outputColumns open_ended_task: boolean variable defining whether the task is open-ended (i.e. the possible crowd annotations are not known beforehand, like in the case of free text input); in the task that we are processing, workers pick the answers from a pre-defined list, therefore the task is not open ended, and this variable is set to False annotation_vector: list of possible crowd answers, mandatory to declare when open_ended_task is False; for our task, this is the list of relations processJudgments: method that defines processing of the raw crowd data; for this task, we process the crowd answers to correspond to the values in annotation_vector The complete configuration class is declared below: End of explanation """ data, config = crowdtruth.load( file = "../data/custom-platform-person-video-multiple-choice.csv", config = TestConfig() ) data['judgments'].head() """ Explanation: Pre-processing the input data After declaring the configuration of our input file, we are ready to pre-process the crowd data: End of explanation """ results = crowdtruth.run(data, config) """ Explanation: Computing the CrowdTruth metrics The pre-processed data can then be used to calculate the CrowdTruth metrics: End of explanation """ results["units"].head() """ Explanation: results is a dict object that contains the quality metrics for video fragments, annotations and crowd workers. The video fragments metrics are stored in results["units"]: End of explanation """ import matplotlib.pyplot as plt %matplotlib inline plt.hist(results["units"]["uqs"]) plt.xlabel("Sentence Quality Score") plt.ylabel("Sentences") """ Explanation: The uqs column in results["units"] contains the video fragment quality scores, capturing the overall workers agreement over each video fragment. Here we plot its histogram: End of explanation """ results["units"]["unit_annotation_score"].head() """ Explanation: The unit_annotation_score column in results["units"] contains the video fragment-annotation scores, capturing the likelihood that an annotation is expressed in a video fragment. For each video fragment, we store a dictionary mapping each annotation to its video fragment-annotation score. End of explanation """ results["workers"].head() """ Explanation: The worker metrics are stored in results["workers"]: End of explanation """ plt.hist(results["workers"]["wqs"]) plt.xlabel("Worker Quality Score") plt.ylabel("Workers") """ Explanation: The wqs columns in results["workers"] contains the worker quality scores, capturing the overall agreement between one worker and all the other workers. End of explanation """ results["annotations"] """ Explanation: The annotation metrics are stored in results["annotations"]. The aqs column contains the annotation quality scores, capturing the overall worker agreement over one annotation. End of explanation """
GoogleCloudPlatform/asl-ml-immersion
notebooks/image_models/solutions/2_mnist_models_vertex.ipynb
apache-2.0
import os from datetime import datetime REGION = "us-central1" PROJECT = !(gcloud config get-value core/project) PROJECT = PROJECT[0] BUCKET = PROJECT MODEL_TYPE = "cnn" # "linear", "dnn", "dnn_dropout", or "cnn" # Do not change these os.environ["PROJECT"] = PROJECT os.environ["BUCKET"] = BUCKET os.environ["REGION"] = REGION os.environ["MODEL_TYPE"] = MODEL_TYPE """ Explanation: MNIST Image Classification with TensorFlow on Vertex AI This notebook demonstrates how to implement different image models on MNIST using the tf.keras API. Learning Objectives Understand how to build a Dense Neural Network (DNN) for image classification Understand how to use dropout (DNN) for image classification Understand how to use Convolutional Neural Networks (CNN) Know how to deploy and use an image classifcation model using Google Cloud's Vertex AI First things first. Configure the parameters below to match your own Google Cloud project details. End of explanation """ %%writefile mnist_models/trainer/task.py import argparse import json import os import sys from . import model def _parse_arguments(argv): """Parses command-line arguments.""" parser = argparse.ArgumentParser() parser.add_argument( '--model_type', help='Which model type to use', type=str, default='linear') parser.add_argument( '--epochs', help='The number of epochs to train', type=int, default=10) parser.add_argument( '--steps_per_epoch', help='The number of steps per epoch to train', type=int, default=100) parser.add_argument( '--job-dir', help='Directory where to save the given model', type=str, default='mnist_models/') return parser.parse_known_args(argv) def main(): """Parses command line arguments and kicks off model training.""" args = _parse_arguments(sys.argv[1:])[0] # Configure path for hyperparameter tuning. trial_id = json.loads( os.environ.get('TF_CONFIG', '{}')).get('task', {}).get('trial', '') output_path = args.job_dir if not trial_id else args.job_dir + '/' model_layers = model.get_layers(args.model_type) image_model = model.build_model(model_layers, args.job_dir) model_history = model.train_and_evaluate( image_model, args.epochs, args.steps_per_epoch, args.job_dir) if __name__ == '__main__': main() """ Explanation: Building a dynamic model In the previous notebook, <a href="mnist_linear.ipynb">mnist_linear.ipynb</a>, we ran our code directly from the notebook. In order to run it on Vertex AI, it needs to be packaged as a python module. The boilerplate structure for this module has already been set up in the folder mnist_models. The module lives in the sub-folder, trainer, and is designated as a python package with the empty __init__.py (mnist_models/trainer/__init__.py) file. It still needs the model and a trainer to run it, so let's make them. Let's start with the trainer file first. This file parses command line arguments to feed into the model. End of explanation """ %%writefile mnist_models/trainer/util.py import tensorflow as tf def scale(image, label): """Scales images from a 0-255 int range to a 0-1 float range""" image = tf.cast(image, tf.float32) image /= 255 image = tf.expand_dims(image, -1) return image, label def load_dataset( data, training=True, buffer_size=5000, batch_size=100, nclasses=10): """Loads MNIST dataset into a tf.data.Dataset""" (x_train, y_train), (x_test, y_test) = data x = x_train if training else x_test y = y_train if training else y_test # One-hot encode the classes y = tf.keras.utils.to_categorical(y, nclasses) dataset = tf.data.Dataset.from_tensor_slices((x, y)) dataset = dataset.map(scale).batch(batch_size) if training: dataset = dataset.shuffle(buffer_size).repeat() return dataset """ Explanation: Next, let's group non-model functions into a util file to keep the model file simple. We'll copy over the scale and load_dataset functions from the previous lab. End of explanation """ %%writefile mnist_models/trainer/model.py import os import shutil import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from tensorflow.keras import Sequential from tensorflow.keras.callbacks import TensorBoard from tensorflow.keras.layers import ( Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Softmax) from . import util # Image Variables WIDTH = 28 HEIGHT = 28 def get_layers( model_type, nclasses=10, hidden_layer_1_neurons=400, hidden_layer_2_neurons=100, dropout_rate=0.25, num_filters_1=64, kernel_size_1=3, pooling_size_1=2, num_filters_2=32, kernel_size_2=3, pooling_size_2=2): """Constructs layers for a keras model based on a dict of model types.""" model_layers = { 'linear': [ Flatten(), Dense(nclasses), Softmax() ], 'dnn': [ Flatten(), Dense(hidden_layer_1_neurons, activation='relu'), Dense(hidden_layer_2_neurons, activation='relu'), Dense(nclasses), Softmax() ], 'dnn_dropout': [ Flatten(), Dense(hidden_layer_1_neurons, activation='relu'), Dense(hidden_layer_2_neurons, activation='relu'), Dropout(dropout_rate), Dense(nclasses), Softmax() ], 'cnn': [ Conv2D(num_filters_1, kernel_size=kernel_size_1, activation='relu', input_shape=(WIDTH, HEIGHT, 1)), MaxPooling2D(pooling_size_1), Conv2D(num_filters_2, kernel_size=kernel_size_2, activation='relu'), MaxPooling2D(pooling_size_2), Flatten(), Dense(hidden_layer_1_neurons, activation='relu'), Dense(hidden_layer_2_neurons, activation='relu'), Dropout(dropout_rate), Dense(nclasses), Softmax() ] } return model_layers[model_type] def build_model(layers, output_dir): """Compiles keras model for image classification.""" model = Sequential(layers) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) return model def train_and_evaluate(model, num_epochs, steps_per_epoch, output_dir): """Compiles keras model and loads data into it for training.""" mnist = tf.keras.datasets.mnist.load_data() train_data = util.load_dataset(mnist) validation_data = util.load_dataset(mnist, training=False) callbacks = [] if output_dir: tensorboard_callback = TensorBoard(log_dir=output_dir) callbacks = [tensorboard_callback] history = model.fit( train_data, validation_data=validation_data, epochs=num_epochs, steps_per_epoch=steps_per_epoch, verbose=2, callbacks=callbacks) if output_dir: export_path = os.path.join(output_dir, 'keras_export') model.save(export_path, save_format='tf') return history """ Explanation: Finally, let's code the models! The tf.keras API accepts an array of layers into a model object, so we can create a dictionary of layers based on the different model types we want to use. The below file has two functions: get_layers and create_and_train_model. We will build the structure of our model in get_layers. Last but not least, we'll copy over the training code from the previous lab into train_and_evaluate. TODO 1: Define the Keras layers for a DNN model TODO 2: Define the Keras layers for a dropout model TODO 3: Define the Keras layers for a CNN model Hint: These models progressively build on each other. Look at the imported tensorflow.keras.layers modules and the default values for the variables defined in get_layers for guidance. End of explanation """ !python3 -m mnist_models.trainer.test """ Explanation: Local Training With everything set up, let's run locally to test the code. Some of the previous tests have been copied over into a testing script mnist_models/trainer/test.py to make sure the model still passes our previous checks. On line 13, you can specify which model types you would like to check. line 14 and line 15 has the number of epochs and steps per epoch respectively. Moment of truth! Run the code below to check your models against the unit tests. If you see "OK" at the end when it's finished running, congrats! You've passed the tests! End of explanation """ current_time = datetime.now().strftime("%Y%m%d_%H%M%S") model_type = "cnn" os.environ["MODEL_TYPE"] = model_type os.environ["JOB_DIR"] = "mnist_models/models/{}_{}/".format( model_type, current_time ) """ Explanation: Now that we know that our models are working as expected, let's run it on Google Cloud within Vertex AI. We can run it as a python module locally first using the command line. The below cell transfers some of our variables to the command line as well as create a job directory including a timestamp. End of explanation """ %%bash python3 -m mnist_models.trainer.task \ --job-dir=$JOB_DIR \ --epochs=5 \ --steps_per_epoch=50 \ --model_type=$MODEL_TYPE """ Explanation: The cell below runs the local version of the code. The epochs and steps_per_epoch flag can be changed to run for longer or shorther, as defined in our mnist_models/trainer/task.py file. End of explanation """ %%writefile mnist_models/setup.py from setuptools import find_packages from setuptools import setup setup( name='mnist_trainer', version='0.1', packages=find_packages(), include_package_data=True, description='MNIST model training application.' ) %%bash cd mnist_models python ./setup.py sdist --formats=gztar cd .. gsutil cp mnist_models/dist/mnist_trainer-0.1.tar.gz gs://${BUCKET}/mnist/ """ Explanation: Training on the cloud For this model, we will be able to use a Tensorflow pre-built container on Vertex AI, as we do not have any particular additional prerequisites. As before, we use setuptools for this, and store the created source distribution on Cloud Storage. End of explanation """ current_time = datetime.now().strftime("%Y%m%d_%H%M%S") model_type = "cnn" os.environ["MODEL_TYPE"] = model_type os.environ["JOB_DIR"] = "gs://{}/mnist_{}_{}/".format( BUCKET, model_type, current_time ) os.environ["JOB_NAME"] = f"mnist_{model_type}_{current_time}" %%bash echo $JOB_DIR $REGION $JOB_NAME PYTHON_PACKAGE_URIS=gs://${BUCKET}/mnist/mnist_trainer-0.1.tar.gz MACHINE_TYPE=n1-standard-4 REPLICA_COUNT=1 PYTHON_PACKAGE_EXECUTOR_IMAGE_URI="us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-3:latest" PYTHON_MODULE=trainer.task WORKER_POOL_SPEC="machine-type=$MACHINE_TYPE,\ replica-count=$REPLICA_COUNT,\ executor-image-uri=$PYTHON_PACKAGE_EXECUTOR_IMAGE_URI,\ python-module=$PYTHON_MODULE" gcloud ai custom-jobs create \ --region=${REGION} \ --display-name=$JOB_NAME \ --python-package-uris=$PYTHON_PACKAGE_URIS \ --worker-pool-spec=$WORKER_POOL_SPEC \ --args="--job-dir=$JOB_DIR,--model_type=$MODEL_TYPE" %%bash SAVEDMODEL_DIR=${JOB_DIR}keras_export echo $SAVEDMODEL_DIR gsutil ls $SAVEDMODEL_DIR """ Explanation: Then, we can kickoff the Vertex AI Custom Job using the pre-built container. We can pass our source distribution URI using the --python-package-uris flag. End of explanation """ %%bash TIMESTAMP=$(date -u +%Y%m%d_%H%M%S) MODEL_DISPLAYNAME=mnist_$TIMESTAMP ENDPOINT_DISPLAYNAME=mnist_endpoint_$TIMESTAMP IMAGE_URI="us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-3:latest" SAVEDMODEL_DIR=${JOB_DIR}keras_export echo $SAVEDMODEL_DIR # Model MODEL_RESOURCENAME=$(gcloud ai models upload \ --region=$REGION \ --display-name=$MODEL_DISPLAYNAME \ --container-image-uri=$IMAGE_URI \ --artifact-uri=$SAVEDMODEL_DIR \ --format="value(model)") echo "MODEL_DISPLAYNAME=${MODEL_DISPLAYNAME}" echo "MODEL_RESOURCENAME=${MODEL_RESOURCENAME}" # Endpoint ENDPOINT_RESOURCENAME=$(gcloud ai endpoints create \ --region=$REGION \ --display-name=$ENDPOINT_DISPLAYNAME \ --format="value(name)") echo "ENDPOINT_DISPLAYNAME=${ENDPOINT_DISPLAYNAME}" echo "ENDPOINT_RESOURCENAME=${ENDPOINT_RESOURCENAME}" # Deployment DEPLOYED_MODEL_DISPLAYNAME=${MODEL_DISPLAYNAME}_deployment MACHINE_TYPE=n1-standard-2 gcloud ai endpoints deploy-model $ENDPOINT_RESOURCENAME \ --region=$REGION \ --model=$MODEL_RESOURCENAME \ --display-name=$DEPLOYED_MODEL_DISPLAYNAME \ --machine-type=$MACHINE_TYPE \ --min-replica-count=1 \ --max-replica-count=1 \ --traffic-split=0=100 """ Explanation: Deploying and predicting with model Once you have a model you're proud of, let's deploy it! All we need to do is to upload the created model artifact from Cloud Storage to Vertex AI as a model, create a new endpoint, and deploy the model to the endpoint. End of explanation """ import codecs import json import matplotlib.pyplot as plt import tensorflow as tf HEIGHT = 28 WIDTH = 28 IMGNO = 12 mnist = tf.keras.datasets.mnist.load_data() (x_train, y_train), (x_test, y_test) = mnist test_image = x_test[IMGNO] jsondata = {"instances": [test_image.reshape(HEIGHT, WIDTH, 1).tolist()]} json.dump(jsondata, codecs.open("test.json", "w", encoding="utf-8")) plt.imshow(test_image.reshape(HEIGHT, WIDTH)); !cat test.json """ Explanation: To predict with the model, let's take one of the example images. TODO 4: Write a .json file with image data to send to a Vertex AI deployed model End of explanation """ %%bash ENDPOINT_RESOURCENAME= # TODO: insert ENDPOINT_RESOURCENAME from above gcloud ai endpoints predict $ENDPOINT_RESOURCENAME \ --region=$REGION \ --json-request=test.json """ Explanation: Finally, we can send it to the prediction service. The output will have a 1 in the index of the corresponding digit it is predicting. Congrats! You've completed the lab! End of explanation """
TomTranter/OpenPNM
examples/tutorials/Working with Mixtures.ipynb
mit
import openpnm as op ws = op.Workspace() ws.settings['loglevel'] = 40 """ Explanation: Working with Mixtures In version 2.1, OpenPNM introduced a new Mixture class, which as the name suggests, combines the properties of several phases into a single mixture. The most common example would be diffusion of oxygen in air, which is of course a mixture of $O_2$ and $N_2$ (ignoring humidity and other minor gases like $CO_2$). The basic premise is that you create normal OpenPNM Phase object for each of the pure components, then create a Mixture object where you specify the composition of each species. The mixture object then provides an interface to manage the properties of each species, such as setting the composition or calculating the molar mass of the mixture. The notebook gives an overview of how this Mixture class works. What problems does the Mixture class solve? It actually solves three problems or points of confusion, which can be illustrated by considering the diffusion of oxygen in air. In traditional OpenPNM scripts, a user creates a GenericPhase object, called air, and specifies a diffusion coefficient (say 2.05e-5 m2/s). This air object is then used in the FickianDiffusion algorithm which finds 'pore.concentration', but only the user actually knows which species this refers to. OpenPNM just solves the problem with given boundary conditions. Assuming air has a total molar concentration of 40,000 mol/m3, by putting a boundary condition of 5000 mol/m3, you are implicitly telling OpenPNM to solve for oxygen. Had you put 35000 mol/m3, you'd have solved for nitrogen concentration. The new mixture class is fully compatible with existing algorithms, but you would need to override the default quantity from 'pore.concentration' to 'pore.concentration.oxygen'. Nothing really changes except it is now fully explicity and transparent what is being solved for. The second benefit is for calculating physical properties of the phase. A traditional GenericPhase object does not know the physical properties of it's components. Consider the Fuller correlation for binary diffusion coefficients. It requires the "molar diffusion volume" for each species, which are tabulated in handbooks. The traditional Fuller diffusion model in the OpenPNM.models library accepts these values as arguments. A new model has been added that looks for these values stored on the each of the component objects. So the new approach allows for more automated and consistent calculation of mixture properties, rather than manually specifying them. Again, this is most a matter of clarity and convenience. Finally, the mixture class allows for the specification of mulitple concentations. When dealing with a binary phase like are it's possible to implicitly assume that 'pore.concentration' refers to oxygen, and that the nitrogen concentration can be found. In a mixture with three or more components knowing a single composition is no longer sufficient, so it becomes a matter of necessity to specify multiple concentrations, which the mixture class allows by appending the component name to the end of the property (i.e. 'pore.concentration.oxygen' and 'pore.concentration.nitrogen' and 'pore.concentration.water_vapor') End of explanation """ pn = op.network.Cubic(shape=[4, 4], spacing=0.001) geo = op.geometry.StickAndBall(network=pn, pores=pn.Ps, throats=pn.Ts) """ Explanation: Start by defining a simple 2D network (for easier visualization): End of explanation """ from openpnm.phases import mixtures O2 = mixtures.species.gases.O2(network=pn, name='oxygen') N2 = mixtures.species.gases.N2(network=pn, name='nitrogen') """ Explanation: In principle, you can define the two pure species as GenericPhase objects, but this leads to problems later since you have to add all the needed physical properties (i.e. molecular weight). A better option is to use the the pre-defined classes in the OpenPNM.mixture submodule. Note that this is not imported with OpenPNM by default so you must import it explicitly: End of explanation """ print(O2) """ Explanation: These species objects do not have many pre-defined properties, but this could grow in the future. End of explanation """ O2['pore.critical_temperature'] = 154.581 O2['pore.critical_pressure'] = 5043000.0 N2['pore.critical_temperature'] = 126.21 N2['pore.critical_pressure'] = 3390000.0 print(O2) """ Explanation: It's also possible for users to add their own specific properties to each species. For instance, if you have a correlation that requires the critical temperature and/or pressure of the components, you could easily add: End of explanation """ air = mixtures.GenericMixture(network=pn, components=[N2, O2]) """ Explanation: With the two 'pure' phases defined, we can now create the mixture phase. End of explanation """ print(air) """ Explanation: Now we can print the air object and see how the properties of the mixture are represented, as well as a list of the components that make the phas (at the bottom): End of explanation """ air.components.keys() """ Explanation: If you have the handle to a mixture, but not to the components, they can be retrieved from the components attribute, which is a dictionary. End of explanation """ O2 = air.components['oxygen'] N2 = air.components['nitrogen'] """ Explanation: From the printout of air above we can see that the two components are named 'oxygen' and 'nitrogen' so we can do the following: End of explanation """ air.set_component(component=O2, mode='remove') print("After deleting O2, there is just N2:") print(air.components.keys()) air.set_component(component=O2, mode='add') print("And O2 can be readded:") print(air.components.keys()) """ Explanation: It's possible to add and remove components after instantiation using the set_component method: End of explanation """ air.set_mole_fraction(component=O2, values=0.21) """ Explanation: Of course, the air object needs to know the concentration of each species. The Mixture class has a method for setting this. End of explanation """ print(air['pore.mole_fraction.oxygen']) """ Explanation: As can be seen above, the 'pore.mole_fraction' property has the pure component name appended to the end so we can tell them apart. We can also look at the values within each array to confirm they are correct: End of explanation """ print(air['pore.mole_fraction.nitrogen']) """ Explanation: Note that you only need to specify N-1 mole fractions and the N-th one can be determined. As N2 composition is not yet specified it will be all nans. End of explanation """ air.update_mole_fractions() print(air['pore.mole_fraction.nitrogen']) """ Explanation: But the update_mole_fractions method will find the component with nans as set them to the necessary value for the summation of mole fractions to be 1.0 in all pores. End of explanation """ air['pore.molar_mass'] """ Explanation: The mixture object also has a few pore-scale models pre-added, such as the ability to find the molecular mass of the mixture: End of explanation """ air['pore.molecular_weight.oxygen'] """ Explanation: The molar mass model uses the mole fraction of each component on the mixture object (illustrated above) and also looks up the molecular weight from each individual species. This is why it's helpful to use the pre-defined species objects in the mixtures submodule since they have some properties of the pure species included. The mixture object is able to access the information of it's components using the following: End of explanation """ phys = op.physics.GenericPhysics(network=pn, phase=air, geometry=geo) """ Explanation: Now let's see the mixture class in action with a FickianDiffusion algorithm. First let's define a physics object: End of explanation """ N2['pore.molar_diffusion_volume'] mod = op.models.phases.diffusivity.fuller air.add_model(propname='pore.diffusivity_old', model=mod, MA=0.032, MB=0.028, vA=16.6, vB=17.9) print(air['pore.diffusivity_old']) """ Explanation: Before add a pore-scale model for diffusive conductance, however, let's consider the diffusion coefficient. We know that the diffusion coefficient of O2 in air is 2.05e-5 m2/s, so we could just hard code that in. But a better way is to use the Fuller correlation. This is implemented in OpenPNM in 2 ways. The first way requires passing in the molar diffusion volume and molecular mass of each species as arguments: End of explanation """ mod = op.models.phases.mixtures.fuller_diffusivity air.add_model(propname='pore.diffusivity', model=mod) print(air['pore.diffusivity']) """ Explanation: This produces a pretty good estimate, but requires looking up the molar diffusion volumes and masses. The species objects already have this information on them, so OpenPNM provides a second version of the Fuller correlation that automatically retrieves it: End of explanation """ phys.add_model(propname='throat.diffusive_conductance', model=op.models.physics.diffusive_conductance.ordinary_diffusion) fd = op.algorithms.FickianDiffusion(network=pn) fd.setup(phase=air, quantity='pore.concentration.oxgyen') fd.set_value_BC(pores=pn.pores('left'), values=1) fd.set_value_BC(pores=pn.pores('right'), values=0) fd.run() """ Explanation: As can be seen, this is much cleaner and produces the same numbers. Now we can add the diffusive conductance model to the physics object and run the diffusion simulation: End of explanation """ print(fd) """ Explanation: Printing the algorithm object reveals that it did indeed solve for 'pore.concentration.oxygen' as desired. End of explanation """
VectorBlox/PYNQ
Pynq-Z1/notebooks/examples/pmod_oled.ipynb
bsd-3-clause
from pynq import Overlay from pynq.iop import Pmod_OLED from pynq.iop import PMODA ol = Overlay("base.bit") ol.download() pmod_oled = Pmod_OLED(PMODA) pmod_oled.clear() pmod_oled.write('Welcome to the\nPynq-Z1 board!') """ Explanation: Displaying text on a PmodOLED This demonstration shows how to display text on a PmodOLED using the Pynq-Z1 board. The Digilent Pmod OLED is required. In this example it should be connected to PMODA. End of explanation """ pmod_oled.clear() pmod_oled.write('Python and Zynq\nproductivity & performance') """ Explanation: You should now see the text output on the OLED, so let's try another message End of explanation """ def get_ip_address(): ipaddr_slist = !hostname -I ipaddr = (ipaddr_slist.s).split(" ")[0] return str(ipaddr) pmod_oled.clear() pmod_oled.write(get_ip_address()) """ Explanation: Finally, capture some text from IPython shell calls and print out to OLED End of explanation """
phuongxuanpham/SelfDrivingCar
CarND-Keras-Lab/traffic-sign-classification-with-keras.ipynb
gpl-3.0
from urllib.request import urlretrieve from os.path import isfile from tqdm import tqdm class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile('train.p'): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Train Dataset') as pbar: urlretrieve( 'https://s3.amazonaws.com/udacity-sdc/datasets/german_traffic_sign_benchmark/train.p', 'train.p', pbar.hook) if not isfile('test.p'): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Test Dataset') as pbar: urlretrieve( 'https://s3.amazonaws.com/udacity-sdc/datasets/german_traffic_sign_benchmark/test.p', 'test.p', pbar.hook) print('Training and Test data downloaded.') """ Explanation: Traffic Sign Classification with Keras Keras exists to make coding deep neural networks simpler. To demonstrate just how easy it is, you’re going to use Keras to build a convolutional neural network in a few dozen lines of code. You’ll be connecting the concepts from the previous lessons to the methods that Keras provides. Dataset The network you'll build with Keras is similar to the example in Keras’s GitHub repository that builds out a convolutional neural network for MNIST. However, instead of using the MNIST dataset, you're going to use the German Traffic Sign Recognition Benchmark dataset that you've used previously. You can download pickle files with sanitized traffic sign data here: End of explanation """ import pickle import numpy as np import math # Fix error with TF and Keras import tensorflow as tf tf.python.control_flow_ops = tf print('Modules loaded.') """ Explanation: Overview Here are the steps you'll take to build the network: Load the training data. Preprocess the data. Build a feedforward neural network to classify traffic signs. Build a convolutional neural network to classify traffic signs. Evaluate the final neural network on testing data. Keep an eye on the network’s accuracy over time. Once the accuracy reaches the 98% range, you can be confident that you’ve built and trained an effective model. End of explanation """ with open('train.p', 'rb') as f: data = pickle.load(f) # TODO: Load the feature data to the variable X_train X_train = data['features'] # TODO: Load the label data to the variable y_train y_train = data['labels'] # STOP: Do not change the tests below. Your implementation should pass these tests. assert np.array_equal(X_train, data['features']), 'X_train not set to data[\'features\'].' assert np.array_equal(y_train, data['labels']), 'y_train not set to data[\'labels\'].' print('Tests passed.') """ Explanation: Load the Data Start by importing the data from the pickle file. End of explanation """ # TODO: Shuffle the data from sklearn.utils import shuffle X_train, y_train = shuffle(X_train, y_train) # STOP: Do not change the tests below. Your implementation should pass these tests. assert X_train.shape == data['features'].shape, 'X_train has changed shape. The shape shouldn\'t change when shuffling.' assert y_train.shape == data['labels'].shape, 'y_train has changed shape. The shape shouldn\'t change when shuffling.' assert not np.array_equal(X_train, data['features']), 'X_train not shuffled.' assert not np.array_equal(y_train, data['labels']), 'y_train not shuffled.' print('Tests passed.') """ Explanation: Preprocess the Data Shuffle the data Normalize the features using Min-Max scaling between -0.5 and 0.5 One-Hot Encode the labels Shuffle the data Hint: You can use the scikit-learn shuffle function to shuffle the data. End of explanation """ # TODO: Normalize the data features to the variable X_normalized def normalize(X, a=0, b=1): """ Normalize the image data with Min-Max scaling to a range of [0.1, 0.9] :param image_data: The image data to be normalized :return: Normalized image data """ # TODO: Implement Min-Max scaling for grayscale image data # feature range [a, b] X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (b - a) + a return X_scaled X_normalized = normalize(X_train, a=-0.5, b=0.5) # STOP: Do not change the tests below. Your implementation should pass these tests. assert math.isclose(np.min(X_normalized), -0.5, abs_tol=1e-5) and math.isclose(np.max(X_normalized), 0.5, abs_tol=1e-5), 'The range of the training data is: {} to {}. It must be -0.5 to 0.5'.format(np.min(X_normalized), np.max(X_normalized)) print('Tests passed.') """ Explanation: Normalize the features Hint: You solved this in TensorFlow lab Problem 1. End of explanation """ # TODO: One Hot encode the labels to the variable y_one_hot from sklearn.preprocessing import LabelBinarizer lb = LabelBinarizer() y_one_hot = lb.fit_transform(y_train) # STOP: Do not change the tests below. Your implementation should pass these tests. import collections assert y_one_hot.shape == (39209, 43), 'y_one_hot is not the correct shape. It\'s {}, it should be (39209, 43)'.format(y_one_hot.shape) assert next((False for y in y_one_hot if collections.Counter(y) != {0: 42, 1: 1}), True), 'y_one_hot not one-hot encoded.' print('Tests passed.') """ Explanation: One-Hot Encode the labels Hint: You can use the scikit-learn LabelBinarizer function to one-hot encode the labels. End of explanation """ from keras.models import Sequential from keras.layers.core import Dense, Activation, Flatten model = Sequential() # TODO: Build a Multi-layer feedforward neural network with Keras here. # 1st Layer - Add a flatten layer model.add(Flatten(input_shape=(32, 32, 3))) # 2nd Layer - Add a fully connected layer model.add(Dense(128)) # 3rd Layer - Add a ReLU activation layer model.add(Activation('relu')) # 4th Layer - Add a fully connected layer model.add(Dense(43)) # 5th Layer - Add a softmax activation layer model.add(Activation('softmax')) # STOP: Do not change the tests below. Your implementation should pass these tests. from keras.layers.core import Dense, Activation, Flatten from keras.activations import relu, softmax def check_layers(layers, true_layers): assert len(true_layers) != 0, 'No layers found' for layer_i in range(len(layers)): assert isinstance(true_layers[layer_i], layers[layer_i]), 'Layer {} is not a {} layer'.format(layer_i+1, layers[layer_i].__name__) assert len(true_layers) == len(layers), '{} layers found, should be {} layers'.format(len(true_layers), len(layers)) check_layers([Flatten, Dense, Activation, Dense, Activation], model.layers) assert model.layers[0].input_shape == (None, 32, 32, 3), 'First layer input shape is wrong, it should be (32, 32, 3)' assert model.layers[1].output_shape == (None, 128), 'Second layer output is wrong, it should be (128)' assert model.layers[2].activation == relu, 'Third layer not a relu activation layer' assert model.layers[3].output_shape == (None, 43), 'Fourth layer output is wrong, it should be (43)' assert model.layers[4].activation == softmax, 'Fifth layer not a softmax activation layer' print('Tests passed.') """ Explanation: Keras Sequential Model ```python from keras.models import Sequential Create the Sequential model model = Sequential() `` Thekeras.models.Sequentialclass is a wrapper for the neural network model. Just like many of the class models in scikit-learn, it provides common functions likefit(),evaluate(), andcompile()`. We'll cover these functions as we get to them. Let's start looking at the layers of the model. Keras Layer A Keras layer is just like a neural network layer. It can be fully connected, max pool, activation, etc. You can add a layer to the model using the model's add() function. For example, a simple model would look like this: ```python from keras.models import Sequential from keras.layers.core import Dense, Activation, Flatten Create the Sequential model model = Sequential() 1st Layer - Add a flatten layer model.add(Flatten(input_shape=(32, 32, 3))) 2nd Layer - Add a fully connected layer model.add(Dense(100)) 3rd Layer - Add a ReLU activation layer model.add(Activation('relu')) 4th Layer - Add a fully connected layer model.add(Dense(60)) 5th Layer - Add a ReLU activation layer model.add(Activation('relu')) ``` Keras will automatically infer the shape of all layers after the first layer. This means you only have to set the input dimensions for the first layer. The first layer from above, model.add(Flatten(input_shape=(32, 32, 3))), sets the input dimension to (32, 32, 3) and output dimension to (3072=32*32*3). The second layer takes in the output of the first layer and sets the output dimenions to (100). This chain of passing output to the next layer continues until the last layer, which is the output of the model. Build a Multi-Layer Feedforward Network Build a multi-layer feedforward neural network to classify the traffic sign images. Set the first layer to a Flatten layer with the input_shape set to (32, 32, 3) Set the second layer to Dense layer width to 128 output. Use a ReLU activation function after the second layer. Set the output layer width to 43, since there are 43 classes in the dataset. Use a softmax activation function after the output layer. To get started, review the Keras documentation about models and layers. The Keras example of a Multi-Layer Perceptron network is similar to what you need to do here. Use that as a guide, but keep in mind that there are a number of differences. End of explanation """ # TODO: Compile and train the model here. # Configures the learning process and metrics # Compile the network using adam optimizer and categorical_crossentropy loss function. model.compile('adam', 'categorical_crossentropy', ['accuracy']) # Train the model # History is a record of training loss and metrics # Train the network for ten epochs and validate with 20% of the training data. history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=10, validation_split=0.2, verbose=2) # STOP: Do not change the tests below. Your implementation should pass these tests. from keras.optimizers import Adam assert model.loss == 'categorical_crossentropy', 'Not using categorical_crossentropy loss function' assert isinstance(model.optimizer, Adam), 'Not using adam optimizer' assert len(history.history['acc']) == 10, 'You\'re using {} epochs when you need to use 10 epochs.'.format(len(history.history['acc'])) assert history.history['acc'][-1] > 0.92, 'The training accuracy was: %.3f. It shoud be greater than 0.92' % history.history['acc'][-1] assert history.history['val_acc'][-1] > 0.85, 'The validation accuracy is: %.3f. It shoud be greater than 0.85' % history.history['val_acc'][-1] print('Tests passed.') """ Explanation: Training a Sequential Model You built a multi-layer neural network in Keras, now let's look at training a neural network. ```python from keras.models import Sequential from keras.layers.core import Dense, Activation model = Sequential() ... Configures the learning process and metrics model.compile('sgd', 'mean_squared_error', ['accuracy']) Train the model History is a record of training loss and metrics history = model.fit(X_train_data, Y_train_data, batch_size=128, nb_epoch=2, validation_split=0.2, verbose=2) Calculate test score test_score = model.evaluate(X_test_data, Y_test_data) `` The code above configures, trains, and tests the model. The linemodel.compile('sgd', 'mean_squared_error', ['accuracy'])configures the model's optimizer to'sgd'(stochastic gradient descent), the loss to'mean_squared_error', and the metric to'accuracy'`. You can find more optimizers here, loss functions here, and more metrics here. To train the model, use the fit() function as shown in model.fit(X_train_data, Y_train_data, batch_size=128, nb_epoch=2, validation_split=0.2, verbose=2). The validation_split parameter will split a percentage of the training dataset to be used to validate the model. Typically you won't have to change the verbose parameter but in Jupyter notebooks the update animation can crash the notebook so we set verbose=2, this limits the animation to only update after an epoch is complete. The model can be further tested with the test dataset using the evaluation() function as shown in the last line. Train the Network Compile the network using adam optimizer and categorical_crossentropy loss function. Train the network for ten epochs and validate with 20% of the training data. End of explanation """ from keras.models import Sequential from keras.layers.core import Dense, Activation, Flatten from keras.layers.convolutional import Convolution2D, Conv2D # TODO: Re-construct the network and add a convolutional layer before the flatten layer. model = Sequential() # TODO: Build a Multi-layer feedforward neural network with Keras here. # 1st Layer - Add a convolution layer model.add(Convolution2D(32, 3, 3, border_mode='valid',input_shape=(32, 32, 3))) #model.add(Conv2D(32, 3, 3, border_mode='valid',input_shape=(32, 32, 3))) # 2nd Layer - Add a ReLU activation layer model.add(Activation('relu')) # 3rd Layer - Add a flatten layer model.add(Flatten()) # 4th Layer - Add a fully connected layer model.add(Dense(128)) # 5th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 6th Layer - Add a fully connected layer model.add(Dense(43)) # 7th Layer - Add a softmax activation layer model.add(Activation('softmax')) # STOP: Do not change the tests below. Your implementation should pass these tests. from keras.layers.core import Dense, Activation, Flatten from keras.layers.convolutional import Convolution2D check_layers([Convolution2D, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers) assert model.layers[0].input_shape == (None, 32, 32, 3), 'First layer input shape is wrong, it should be (32, 32, 3)' assert model.layers[0].nb_filter == 32, 'Wrong number of filters, it should be 32' assert model.layers[0].nb_col == model.layers[0].nb_row == 3, 'Kernel size is wrong, it should be a 3x3' assert model.layers[0].border_mode == 'valid', 'Wrong padding, it should be valid' model.compile('adam', 'categorical_crossentropy', ['accuracy']) history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2, verbose=2) assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1] print('Tests passed.') """ Explanation: Convolutions Re-construct the previous network Add a convolutional layer with 32 filters, a 3x3 kernel, and valid padding before the flatten layer. Add a ReLU activation after the convolutional layer. Hint 1: The Keras example of a convolutional neural network for MNIST would be a good example to review. End of explanation """ from keras.models import Sequential from keras.layers.core import Dense, Activation, Flatten from keras.layers.convolutional import Convolution2D, Conv2D from keras.layers.pooling import MaxPooling2D # TODO: Re-construct the network and add a pooling layer after the convolutional layer. model = Sequential() # TODO: Build a Multi-layer feedforward neural network with Keras here. # 1st Layer - Add a convolution layer model.add(Convolution2D(32, 3, 3, border_mode='valid',input_shape=(32, 32, 3))) # 2nd Layer - Add a 2x2 max pooling layer model.add(MaxPooling2D(pool_size=(2, 2))) # 3rd Layer - Add a ReLU activation layer model.add(Activation('relu')) # 4th Layer - Add a flatten layer model.add(Flatten()) # 5th Layer - Add a fully connected layer model.add(Dense(128)) # 6th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 7th Layer - Add a fully connected layer model.add(Dense(43)) # 8th Layer - Add a softmax activation layer model.add(Activation('softmax')) # STOP: Do not change the tests below. Your implementation should pass these tests. from keras.layers.core import Dense, Activation, Flatten from keras.layers.convolutional import Convolution2D from keras.layers.pooling import MaxPooling2D check_layers([Convolution2D, MaxPooling2D, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers) assert model.layers[1].pool_size == (2, 2), 'Second layer must be a max pool layer with pool size of 2x2' model.compile('adam', 'categorical_crossentropy', ['accuracy']) history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2, verbose=2) assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1] print('Tests passed.') """ Explanation: Pooling Re-construct the network Add a 2x2 max pooling layer immediately following your convolutional layer. End of explanation """ from keras.layers.core import Dense, Activation, Flatten, Dropout from keras.layers.convolutional import Convolution2D from keras.layers.pooling import MaxPooling2D # TODO: Re-construct the network and add dropout after the pooling layer. model = Sequential() # TODO: Build a Multi-layer feedforward neural network with Keras here. # 1st Layer - Add a convolution layer model.add(Convolution2D(32, 3, 3, border_mode='valid',input_shape=(32, 32, 3))) # 2nd Layer - Add a 2x2 max pooling layer model.add(MaxPooling2D(pool_size=(2, 2))) # 3rd Layer - Add a dropout layer. Set the dropout rate to 50%. model.add(Dropout(0.5)) # 4th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 5th Layer - Add a flatten layer model.add(Flatten()) # 6th Layer - Add a fully connected layer model.add(Dense(128)) # 7th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 8th Layer - Add a fully connected layer model.add(Dense(43)) # 9th Layer - Add a softmax activation layer model.add(Activation('softmax')) # STOP: Do not change the tests below. Your implementation should pass these tests. from keras.layers.core import Dense, Activation, Flatten, Dropout from keras.layers.convolutional import Convolution2D from keras.layers.pooling import MaxPooling2D check_layers([Convolution2D, MaxPooling2D, Dropout, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers) assert model.layers[2].p == 0.5, 'Third layer should be a Dropout of 50%' model.compile('adam', 'categorical_crossentropy', ['accuracy']) history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2, verbose=2) assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1] print('Tests passed.') """ Explanation: Dropout Re-construct the network Add a dropout layer after the pooling layer. Set the dropout rate to 50%. End of explanation """ # TODO: Build a model from keras.layers.core import Dense, Activation, Flatten, Dropout from keras.layers.convolutional import Convolution2D from keras.layers.pooling import MaxPooling2D # TODO: Re-construct the network and add dropout after the pooling layer. model = Sequential() # TODO: Build a Multi-layer feedforward neural network with Keras here. # 1st Layer - Add a convolution layer model.add(Convolution2D(32, 3, 3, border_mode='valid',input_shape=(32, 32, 3))) # 2nd Layer - Add a 2x2 max pooling layer model.add(MaxPooling2D(pool_size=(2, 2))) # 3rd Layer - Add a dropout layer. Set the dropout rate to 50%. model.add(Dropout(0.5)) # 4th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 5th Layer - Add a flatten layer model.add(Flatten()) # 6th Layer - Add a fully connected layer model.add(Dense(128)) # 7th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 8th Layer - Add a fully connected layer model.add(Dense(43)) # 9th Layer - Add a softmax activation layer model.add(Activation('softmax')) # TODO: Compile and train the model model.compile('adam', 'categorical_crossentropy', ['accuracy']) history = model.fit(X_normalized, y_one_hot, batch_size=50, nb_epoch=20, validation_split=0.2, verbose=2) """ Explanation: Optimization Congratulations! You've built a neural network with convolutions, pooling, dropout, and fully-connected layers, all in just a few lines of code. Have fun with the model and see how well you can do! Add more layers, or regularization, or different padding, or batches, or more training epochs. What is the best validation accuracy you can achieve? batch_size=50, nb_epoch=20, border_mode='valid' End of explanation """ # TODO: Build a model from keras.layers.core import Dense, Activation, Flatten, Dropout from keras.layers.convolutional import Convolution2D from keras.layers.pooling import MaxPooling2D # TODO: Re-construct the network and add dropout after the pooling layer. model = Sequential() # TODO: Build a Multi-layer feedforward neural network with Keras here. # 1st Layer - Add a convolution layer model.add(Convolution2D(32, 3, 3, border_mode='valid',input_shape=(32, 32, 3))) # 2nd Layer - Add a 2x2 max pooling layer model.add(MaxPooling2D(pool_size=(2, 2))) # 3rd Layer - Add a dropout layer. Set the dropout rate to 50%. model.add(Dropout(0.5)) # 4th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 5th Layer - Add a flatten layer model.add(Flatten()) # 6th Layer - Add a fully connected layer model.add(Dense(128)) # 7th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 8th Layer - Add a fully connected layer model.add(Dense(43)) # 9th Layer - Add a softmax activation layer model.add(Activation('softmax')) # TODO: Compile and train the model model.compile('adam', 'categorical_crossentropy', ['accuracy']) history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=20, validation_split=0.2, verbose=2) """ Explanation: batch_size=128, nb_epoch=20, border_mode='valid' End of explanation """ # TODO: Build a model from keras.layers.core import Dense, Activation, Flatten, Dropout from keras.layers.convolutional import Convolution2D from keras.layers.pooling import MaxPooling2D # TODO: Re-construct the network and add dropout after the pooling layer. model = Sequential() # TODO: Build a Multi-layer feedforward neural network with Keras here. # 1st Layer - Add a convolution layer model.add(Convolution2D(32, 3, 3, border_mode='valid',input_shape=(32, 32, 3))) # 2nd Layer - Add a 2x2 max pooling layer model.add(MaxPooling2D(pool_size=(2, 2))) # 3rd Layer - Add a dropout layer. Set the dropout rate to 50%. model.add(Dropout(0.5)) # 4th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 1st Layer - Add a convolution layer model.add(Convolution2D(32, 3, 3, border_mode='valid')) # 2nd Layer - Add a 2x2 max pooling layer model.add(MaxPooling2D(pool_size=(2, 2))) # 3rd Layer - Add a dropout layer. Set the dropout rate to 50%. model.add(Dropout(0.5)) # 4th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 5th Layer - Add a flatten layer model.add(Flatten()) # 6th Layer - Add a fully connected layer model.add(Dense(128)) # 7th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 8th Layer - Add a fully connected layer model.add(Dense(43)) # 9th Layer - Add a softmax activation layer model.add(Activation('softmax')) # TODO: Compile and train the model model.compile('adam', 'categorical_crossentropy', ['accuracy']) history = model.fit(X_normalized, y_one_hot, batch_size=50, nb_epoch=10, validation_split=0.2, verbose=2) """ Explanation: Add one more convolution layer with dropout 50%, batch_size=50, nb_epoch=10 End of explanation """ # TODO: Build a model from keras.layers.core import Dense, Activation, Flatten, Dropout from keras.layers.convolutional import Convolution2D from keras.layers.pooling import MaxPooling2D # TODO: Re-construct the network and add dropout after the pooling layer. model = Sequential() # TODO: Build a Multi-layer feedforward neural network with Keras here. # 1st Layer - Add a convolution layer model.add(Convolution2D(32, 3, 3, border_mode='same',input_shape=(32, 32, 3))) # 2nd Layer - Add a 2x2 max pooling layer model.add(MaxPooling2D(pool_size=(2, 2))) # 3rd Layer - Add a dropout layer. Set the dropout rate to 50%. model.add(Dropout(0.5)) # 4th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 1st Layer - Add a convolution layer model.add(Convolution2D(32, 3, 3, border_mode='same')) # 2nd Layer - Add a 2x2 max pooling layer model.add(MaxPooling2D(pool_size=(2, 2))) # 3rd Layer - Add a dropout layer. Set the dropout rate to 50%. model.add(Dropout(0.5)) # 4th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 5th Layer - Add a flatten layer model.add(Flatten()) # 6th Layer - Add a fully connected layer model.add(Dense(128)) # 7th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 8th Layer - Add a fully connected layer model.add(Dense(43)) # 9th Layer - Add a softmax activation layer model.add(Activation('softmax')) # TODO: Compile and train the model model.compile('adam', 'categorical_crossentropy', ['accuracy']) history = model.fit(X_normalized, y_one_hot, batch_size=50, nb_epoch=20, validation_split=0.2, verbose=2) """ Explanation: Add one more convolution layer with dropout 50%, batch_size=50, nb_epoch=20, border_mode='same' End of explanation """ # TODO: Build a model from keras.layers.core import Dense, Activation, Flatten, Dropout from keras.layers.convolutional import Convolution2D from keras.layers.pooling import MaxPooling2D # TODO: Re-construct the network and add dropout after the pooling layer. model = Sequential() # TODO: Build a Multi-layer feedforward neural network with Keras here. # 1st Layer - Add a convolution layer model.add(Convolution2D(32, 3, 3, border_mode='valid',input_shape=(32, 32, 3))) # 2nd Layer - Add a 2x2 max pooling layer model.add(MaxPooling2D(pool_size=(2, 2))) # 3rd Layer - Add a dropout layer. Set the dropout rate to 50%. model.add(Dropout(0.5)) # 4th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 1st Layer - Add a convolution layer model.add(Convolution2D(32, 3, 3, border_mode='valid')) # 2nd Layer - Add a 2x2 max pooling layer model.add(MaxPooling2D(pool_size=(2, 2))) # 3rd Layer - Add a dropout layer. Set the dropout rate to 50%. model.add(Dropout(0.5)) # 4th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 5th Layer - Add a flatten layer model.add(Flatten()) # 6th Layer - Add a fully connected layer model.add(Dense(128)) # 7th Layer - Add a ReLU activation layer model.add(Activation('relu')) # 8th Layer - Add a fully connected layer model.add(Dense(43)) # 9th Layer - Add a softmax activation layer model.add(Activation('softmax')) # TODO: Compile and train the model model.compile('adam', 'categorical_crossentropy', ['accuracy']) history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=20, validation_split=0.2, verbose=2) """ Explanation: Add one more convolution layer with dropout 50%, batch_size=128, nb_epoch=20, border_mode='valid' End of explanation """ # TODO: Load test data with open('test.p', 'rb') as f: test_data = pickle.load(f) # TODO: Load the feature data to the variable X_train X_test = test_data['features'] # TODO: Load the label data to the variable y_train y_test = test_data['labels'] # TODO: Preprocess data & one-hot encode the labels X_normalized_test = normalize(X_test, a=-0.5, b=0.5) y_one_hot_test = lb.transform(y_test) # TODO: Evaluate model on test data metrics = model.evaluate(X_normalized_test, y_one_hot_test) for metric_i in range(len(model.metrics_names)): metric_name = model.metrics_names[metric_i] metric_value = metrics[metric_i] print('{}: {}'.format(metric_name, metric_value)) """ Explanation: Best Validation Accuracy: 0.9897 Testing Once you've picked out your best model, it's time to test it. Load up the test data and use the evaluate() method to see how well it does. Hint 1: The evaluate() method should return an array of numbers. Use the metrics_names property to get the labels. End of explanation """
tritemio/PyBroMo
notebooks/PyBroMo - 4. Two-state dynamics - Static smFRET simulation.ipynb
gpl-2.0
%matplotlib inline from pathlib import Path import numpy as np import tables import matplotlib.pyplot as plt import seaborn as sns import pybromo as pbm import phconvert as phc print('Numpy version:', np.__version__) print('PyTables version:', tables.__version__) print('PyBroMo version:', pbm.__version__) print('phconvert version:', phc.__version__) SIM_PATH = 'data/' """ Explanation: PyBroMo 4. Two-state dynamics - Static smFRET simulation <small><i> This notebook is part of <a href="http://tritemio.github.io/PyBroMo" target="_blank">PyBroMo</a> a python-based single-molecule Brownian motion diffusion simulator that simulates confocal smFRET experiments. </i></small> Overview In this notebook we generate single-polulation static smFRET data files from the same diffusion trajectories. These files are needed by the next notebook to generate smFRET data with 2-state dynamics. Loading the software Import all the relevant libraries: End of explanation """ Epr1 = 0.75 Epr2 = 0.4 """ Explanation: Define populations We assume a $\gamma = 0.7$ and two populations, one with $E_{PR}=0.75$ and the other $E_{PR}=0.4$ End of explanation """ gamma = 0.7 E1 = Epr1 /(Epr1 * (1 - gamma) + gamma) E2 = Epr2 /(Epr2 * (1 - gamma) + gamma) E1, E2 """ Explanation: The corrected $E$ for the two populations are: End of explanation """ Λ1 = 200e3 # kcps, the detected (i.e. uncorrected) peak emission rate for population 1 Λγ = Λ1 * Epr1 / E1 Λ2 = Λγ * E2 / Epr2 Λ2 Λ2 = np.round(Λ2, -3) Λ2 """ Explanation: The simulation takes the uncorrected $E_{PR}$ as input. We want to simulate a second population that has measured brightness scaling as if the difference was only due to the $\gamma$ factor. Using the definitions $\Lambda$ and $\Lambda_\gamma$ from (Ingargiola 2017), we can use the relation: $$\frac{E}{E_{PR}} = \frac{\Lambda}{\Lambda_\gamma}$$ Solving for $\Lambda_\gamma$ or $\Lambda$ we get: $$ \Lambda_\gamma = \Lambda\frac{E_{PR}}{E}$$ $${\Lambda} = {\Lambda_\gamma}\frac{E}{E_{PR}} $$ Since $\Lambda_\gamma$ is gamma corrected does not depend on $E$. We can compute it from the parameters of population 1 and then use it for finding $\Lambda$ for population 2: End of explanation """ S = pbm.ParticlesSimulation.from_datafile('0eb9', mode='a', path=SIM_PATH) S.particles.diffusion_coeff_counts #S = pbm.ParticlesSimulation.from_datafile('44dc', mode='a', path=SIM_PATH) """ Explanation: Create smFRET data-files Create a file for storing timestamps Here we load a diffusion simulation opening a file to save timstamps in write mode. Use 'a' (i.e. append) to keep previously simulated timestamps for the given diffusion. End of explanation """ params1 = dict( em_rates = (Λ1,), # Peak emission rates (cps) for each population (D+A) E_values = (Epr1,), # FRET efficiency for each population num_particles = (35,), # Number of particles in each population bg_rate_d = 900, # Poisson background rate (cps) Donor channel bg_rate_a = 600, # Poisson background rate (cps) Acceptor channel ) """ Explanation: Simulate timestamps of smFRET We want to simulate two separate smFRET files representing two static populations. We start definint the simulation parameters for population 1 with the following syntax: End of explanation """ params2 = dict( em_rates = (Λ2,), # Peak emission rates (cps) for each population (D+A) E_values = (Epr2,), # FRET efficiency for each population num_particles = (35,), # Number of particles in each population bg_rate_d = 900, # Poisson background rate (cps) Donor channel bg_rate_a = 600, # Poisson background rate (cps) Acceptor channel ) """ Explanation: We can now define population 2: End of explanation """ params_mix = dict( em_rates = (Λ1, Λ2), # Peak emission rates (cps) for each population (D+A) E_values = (Epr1, Epr2), # FRET efficiency for each population num_particles = (20, 15), # Number of particles in each population bg_rate_d = 900, # Poisson background rate (cps) Donor channel bg_rate_a = 600, # Poisson background rate (cps) Acceptor channel ) """ Explanation: Finally, we also define a static mixture of the two populations: End of explanation """ mix_sim = pbm.TimestapSimulation(S, **params1) mix_sim.summarize() """ Explanation: Simulate static population 1 Population 1: Create the object that will run the simulation and print a summary: End of explanation """ rs = np.random.RandomState(1234) mix_sim.run(rs=rs, overwrite=False, skip_existing=True) """ Explanation: Run the simualtion: End of explanation """ mix_sim.save_photon_hdf5(identity=dict(author='Antonino Ingargiola', author_affiliation='UCLA')) """ Explanation: Save simulation to a smFRET Photon-HDF5 file: End of explanation """ mix_sim = pbm.TimestapSimulation(S, **params2) mix_sim.summarize() rs = np.random.RandomState(1234) mix_sim.run(rs=rs, overwrite=False, skip_existing=True) mix_sim.save_photon_hdf5(identity=dict(author='Antonino Ingargiola', author_affiliation='UCLA')) """ Explanation: Simulate static population 2 Population 2: Create the object that will run the simulation and print a summary: End of explanation """ mix_sim = pbm.TimestapSimulation(S, **params_mix) mix_sim.summarize() rs = np.random.RandomState(1234) mix_sim.run(rs=rs, overwrite=False, skip_existing=True) mix_sim.save_photon_hdf5(identity=dict(author='Antonino Ingargiola', author_affiliation='UCLA')) !rsync -av --exclude 'pybromo_*.hdf5' /mnt/archive/Antonio/pybromo /mnt/wAntonio/dd """ Explanation: Simulate static mixture Static mixture: Create the object that will run the simulation and print a summary: End of explanation """ import fretbursts as fb filepath = list(Path(SIM_PATH).glob('smFRET_*')) filepath d = fb.loader.photon_hdf5(str(filepath[0])) d d.A_em fb.dplot(d, fb.timetrace); d.calc_bg(fun=fb.bg.exp_fit, tail_min_us='auto', F_bg=1.7, time_s=5) fb.dplot(d, fb.timetrace_bg) d.burst_search(F=7) d.num_bursts ds = d.select_bursts(fb.select_bursts.size, th1=20) ds.num_bursts with plt.rc_context({#'font.size': 10, #'savefig.dpi': 200, 'figure.dpi': 150}): for i in range(3): fig, ax = plt.subplots(figsize=(100, 3)) fb.dplot(d, fb.timetrace, binwidth=0.5e-3, tmin=i*10, tmax=(i+1)*10, bursts=True, plot_style=dict(lw=1), ax=ax); ax.set_xlim(i*10, (i+1)*10); display(fig) plt.close(fig) fb.dplot(ds, fb.hist_fret, pdf=False) plt.axvline(0.4, color='k', ls='--'); fb.bext.burst_data(ds) fb.dplot(d, fb.hist_size) fb.dplot(d, fb.hist_width) """ Explanation: Burst analysis The generated Photon-HDF5 files can be analyzed by any smFRET burst analysis program. Here we show an example using the opensource FRETBursts program: End of explanation """
satishgoda/learning
web/coffeescript/coffeescript.ipynb
mit
from IPython.core.display import HTML, Javascript from IPython.core import display """ Explanation: About http://coffeescript.org https://en.wikipedia.org/wiki/Jeremy_Ashkenas https://en.wikipedia.org/wiki/CoffeeScript End of explanation """ !ls *b !coffee -v """ Explanation: Installed coffee script using npm Following is the directory structure that we are going to follow for this tutorial. End of explanation """ !coffee -h """ Explanation: Help End of explanation """ expr = "square = (x) -> x*x" !coffee -c -e "$expr" !coffee -b -c -e "$expr" """ Explanation: Hello World (Expression) End of explanation """ src = "tutorial/src" lib = "tutorial/jslib" %%writefile "$src/cs0.coffee" square = (x) -> x * x !coffee --no-header -b -p -c "$src/cs0.coffee" !coffee -o "$lib" -c "$src/cs0.coffee" !cat "$lib/cs0.js" !ls * -R """ Explanation: Hello World (Script) End of explanation """ %%writefile "$src/cs1.coffee" fill = (container, liquid="water") -> "Filling the #{container} with #{liquid}" !coffee -b -o "$lib" -c "$src/cs1.coffee" #!cat "$lib/cs1.js" """ Explanation: Function Default Arguments End of explanation """ %%javascript //# %load "tutorial/lib/cs1.js" // Generated by CoffeeScript 1.12.3 var fill; fill = function(container, liquid) { if (liquid == null) { liquid = "water"; } return "Filling the " + container + " with " + liquid; }; alert(fill("Cups")); """ Explanation: Failed experiment First load the java script file and then add the javascript cell magic. End of explanation """ %%writefile "$src/cs2.coffee" song = ['do', 're', 'mi', 'fa', 'so'] singers = { Jagger: "Rock", Elvis: "Roll"} bitlist = [ 1, 0, 1 0, 0, 1 1, 1, 0 ] kids = brother: name: "max", age: 11 sister: name: "Ida" age: 9 !coffee -b --no-header -o "$lib" -c "$src/cs2.coffee" !cat "$lib/cs2.js" """ Explanation: What I have observed is that the load magic command substitutes the variable expansion!! Is there a way to not let this happen? Data Types End of explanation """ %%writefile "$src/cs3.coffee" $('.account').attr class: 'active' !coffee -b --no-header -o "$lib" -c "$src/cs3.coffee" !cat "$lib/cs3.js" """ Explanation: On Reserved Words End of explanation """ %%writefile "$src/cs4.coffee" name = "Jim Carrey" mask = "The Mask" weapon = "Laughter" superhero = {name, mask, weapon} output = "#{turtle.name} wears an #{turtle.mask} as a mask. Watch out for his #{turtle.weapon}!" !coffee -b -o "$lib" -c "$src/cs4.coffee" !cat "$lib/cs4.js" """ Explanation: Creating Objects End of explanation """ %%writefile "$src/cs5.coffee" outer = 1 changeNumbers = -> inner = -1 outer = 10 inner = changeNumbers() !coffee -b -o "$lib" -c "$src/cs5.coffee" !cat "$lib/cs5.js" """ Explanation: Lexical Scoping and Variable Safety End of explanation """ #%%javascript %load "$lib/cs1.js" """ Explanation: TODO Read more about the variable scoping and variable safety. https://en.wikipedia.org/wiki/CoffeeScript https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Strict_mode End of explanation """
kinshuk4/MoocX
misc/deep_learning_notes/Ch2 Intro to Tensorflow/007 - tensorflow API exploration.ipynb
mit
import tensorflow as tf from pprint import pprint """ Explanation: Here we go through the API doc for tensorflow, and test various senarious out to get a better understanding of the mechanics of tensorflow End of explanation """ c = tf.constant(4.0) assert c.graph is tf.get_default_graph() g = tf.Graph() with g.as_default(): # Define operations and tensors in `g` c = tf.constant(30.0) assert c.graph is g # now if we go g.finalize, no new operation could be added afterward. g.finalize() print(g.finalized) # uncomment this line, and it should give you an error! # d = tf.constant(10) g = tf.Graph() with g.as_default(): a = tf.constant([[2]]) b = tf.constant([[1]]) with g.control_dependencies([a, b]): c = tf.matmul(a, b) with tf.Session() as sess: result = sess.run(c) print(result) g = tf.Graph() with g.device('/gpu:0'): a = tf.constant([[2]]) with tf.Session() as sess: print(sess.run(a)) """ Explanation: Graph End of explanation """ x = tf.constant(1) print(x.op.name) with tf.Graph().as_default() as g: y = tf.constant(1, name='y') assert y.op.name == 'y', y.op.name with g.name_scope('some_scope'): a = tf.constant(2, name='a') assert a.op.name == 'some_scope/a', a.op.name """ Explanation: another example ```python with g.device('/gpu:0'): # All operations constructed in this context will be placed # on GPU 0. with g.device(None): # All operations constructed in this context will have no # assigned device. Defines a function from Operation to device string. def matmul_on_gpu(n): if n.type == "MatMul": return "/gpu:0" else: return "/cpu:0" with g.device(matmul_on_gpu): # All operations of type "MatMul" constructed in this context # will be placed on GPU 0; all other operations will be placed # on CPU 0. ``` constants and variables End of explanation """ with tf.Graph().as_default() as g: c = tf.constant(5.0, name="c") assert c.op.name == "c" c_1 = tf.constant(6.0, name="c") assert c_1.op.name == "c_1" # Creates a scope called "nested" with g.name_scope("nested") as scope: nested_c = tf.constant(10.0, name="c") assert nested_c.op.name == "nested/c" # Creates a nested scope called "inner". with g.name_scope("inner"): nested_inner_c = tf.constant(20.0, name="c") assert nested_inner_c.op.name == "nested/inner/c" # Create a nested scope called "inner_1". with g.name_scope("inner"): nested_inner_1_c = tf.constant(30.0, name="c") assert nested_inner_1_c.op.name == "nested/inner_1/c" # Treats `scope` as an absolute name scope, and # switches to the "nested/" scope. with g.name_scope(scope): nested_d = tf.constant(40.0, name="d") assert nested_d.op.name == "nested/d" with g.name_scope(""): e = tf.constant(50.0, name="e") assert e.op.name == "e" with tf.Graph().as_default() as g: c = tf.constant(5.0, name='c') d = tf.constant(4, name='d') g.add_to_collection('samosa', c) g.add_to_collection('samosa', c) g.add_to_collection('samosa', c) pprint(g.get_collection('samosa')) # the list_ref returns the original list, which # can be mutated inplace to add or remove tensors. list_ref = g.get_collection_ref('samosa') list_ref.append(d) pprint(g.get_collection('samosa')) """ Explanation: A full example End of explanation """
marcus-nystrom/python_course
Week4_lecture.ipynb
gpl-3.0
import numpy as np import matplotlib.pyplot as plt # A first attempt (we ignore the target for now) image_size = (1280, 1024) # Size of background in pixels nDistractors = 10 # Number of distractors distractor_size = 500 # Generate positions where to put the distractors xr = np.random.randint(0, image_size[0], nDistractors) yr = np.random.randint(0, image_size[1], nDistractors) plt.scatter(xr, yr, s=distractor_size ,c='b',marker='v') plt.axis([0, image_size[0], 0, image_size[1]]) plt.show() """ Explanation: Lecture notes from the fourth week¶ Programming for the Behavioral Sciences A large part of running behavioural experiments concerns the preparation of stimuli, i.e., what you have your participants looking at. The goal of this week is to create stimuli for a visual search experiment where participants search for a target object among distractors (non-targets that distract you from finding the target). We want to create a stimulus image where we flexibly can control the background color of the image as well as the the color, shape, and size of the target and distractors. An example stimuli is shown here; the red triangle is the target and the blue dots are the distractors: <img src="img\stimulus.png" alt="Stimulus" style="width:304px;height:228px;"> This week, we will use Matplotlib to generate the images. Next week PsychoPy will be used to accomplish the same task. The rest of the lectures in this course will be devoted to implement central parts of the experimental process in a visual search experiment: create stimuli, record data, and plot and analyze data. Introduction to this week's exercise So what do we need to know before we can start building the stimuli? Information about the background (size, color) Information about the target (position, shape, color) Information about the distractors (positions, shape, color) End of explanation """ # Divide the plot into a 10 x 8 grid, and allow only one distractor in each grid image_size = [1280, 1024] grid_size = [10, 8] grid_size_pixels_x = image_size[0] / grid_size[0] grid_size_pixels_y = image_size[1] / grid_size[1] x_c = np.arange(grid_size_pixels_x / 2.0, image_size[0], grid_size_pixels_x) y_c = np.arange(grid_size_pixels_y / 2.0, image_size[1], grid_size_pixels_y) # Plot the positions of the new grid xx = np.ones(len(x_c)) yy = np.ones(len(y_c)) plt.plot(x_c, xx, 'ro') plt.plot(yy, y_c, 'bo') # plt.axis([0, image_size[0], 0, image_size[1]]) plt.show() """ Explanation: Two problem are visible * The distractors overlap * Parts of a distractor can be outside of the plot One way to solve this is ensure that the distractors are always separated by a large enough distance to other distractors and to the image border. End of explanation """ # Meshgrid creats the whole grid (you could also use a double for-) x_all, y_all = np.meshgrid(x_c, y_c) # Reshape the positions into a N x 2 array (N rows, 2 columns), to make it easier to work with later xy_all = np.vstack((x_all.flatten(), y_all.flatten())).T # Plot all grid elements plt.figure() plt.plot(xy_all[:, 0], xy_all[:, 1], 'g+') plt.show() """ Explanation: New problem. Seems like only the x-, and y-, coordinates of the grid elements were defined, but not the locations for ALL grid elements. How can this be done? End of explanation """ import time # Used to animate below nSelect = 10 # Randomly change the positions of the locations in the array np.random.shuffle(xy_all) # Plot the result (looks much better!) plt.scatter(xy_all[:nSelect, 0], xy_all[:nSelect, 1], s=distractor_size ,c='r',marker='v') plt.axis([0, image_size[0], 0, image_size[1]]) plt.show() """ Explanation: Now we know where distractors can be placed. But we don't want to put a distractor at each grid position, but draw a number of them (say 10) at random. One way to do this is the 'shuffle' the array, and then select the 10 first elements. End of explanation """ # Example of how dictionaries are defined... d1 = {'key1': 4, 'key2': 'my_value2'} #... and how the values are accessed from them print(d1['key2']) # Unlike lists and arrays, variables in dictionaries are not ordered, so you can't do, e.g., # print(d1[0]) """ Explanation: Dictionaries In the assigment, dictionaries will be used as containers of information about the background, target, and distractors. A dictionary is just like it sounds; given a key (-word), it returns whatever is behind the door the key opens (a number, string, or any other python object). End of explanation """ # Specify the size and color of the background. Use a dictionary background = {'size':np.array([1280, 1024]),'color':0.5} # zero - black, 1 - white # Specify the target target = {'shape':'^', 'size':10, 'color':'r', 'face_color':'r'} # Specify the distractors distractor = {'shape':'o', 'size':10, 'color':'b', 'number_of':10} # Test prints print(background['color'], distractor['size']) """ Explanation: In this assignment, the dictionaries contain information about the visual search images End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/csir-csiro/cmip6/models/sandbox-2/seaice.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'csir-csiro', 'sandbox-2', 'seaice') """ Explanation: ES-DOC CMIP6 Model Properties - Seaice MIP Era: CMIP6 Institute: CSIR-CSIRO Source ID: SANDBOX-2 Topic: Seaice Sub-Topics: Dynamics, Thermodynamics, Radiative Processes. Properties: 80 (63 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:53:54 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.model.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties --&gt; Model 2. Key Properties --&gt; Variables 3. Key Properties --&gt; Seawater Properties 4. Key Properties --&gt; Resolution 5. Key Properties --&gt; Tuning Applied 6. Key Properties --&gt; Key Parameter Values 7. Key Properties --&gt; Assumptions 8. Key Properties --&gt; Conservation 9. Grid --&gt; Discretisation --&gt; Horizontal 10. Grid --&gt; Discretisation --&gt; Vertical 11. Grid --&gt; Seaice Categories 12. Grid --&gt; Snow On Seaice 13. Dynamics 14. Thermodynamics --&gt; Energy 15. Thermodynamics --&gt; Mass 16. Thermodynamics --&gt; Salt 17. Thermodynamics --&gt; Salt --&gt; Mass Transport 18. Thermodynamics --&gt; Salt --&gt; Thermodynamics 19. Thermodynamics --&gt; Ice Thickness Distribution 20. Thermodynamics --&gt; Ice Floe Size Distribution 21. Thermodynamics --&gt; Melt Ponds 22. Thermodynamics --&gt; Snow Processes 23. Radiative Processes 1. Key Properties --&gt; Model Name of seaice model used. 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of sea ice model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.model.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.variables.prognostic') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sea ice temperature" # "Sea ice concentration" # "Sea ice thickness" # "Sea ice volume per grid cell area" # "Sea ice u-velocity" # "Sea ice v-velocity" # "Sea ice enthalpy" # "Internal ice stress" # "Salinity" # "Snow temperature" # "Snow depth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Variables List of prognostic variable in the sea ice model. 2.1. Prognostic Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of prognostic variables in the sea ice component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "TEOS-10" # "Constant" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Seawater Properties Properties of seawater relevant to sea ice 3.1. Ocean Freezing Point Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Ocean Freezing Point Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant seawater freezing point, specify this value. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Resolution Resolution of the sea ice grid 4.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Canonical Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Number Of Horizontal Gridpoints Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Tuning Applied Tuning applied to sea ice model component 5.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Target Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Simulations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 *Which simulations had tuning applied, e.g. all, not historical, only pi-control? * End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.4. Metrics Used Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List any observed metrics used in tuning model/parameters End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.5. Variables Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Which variables were changed during the tuning process? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Ice strength (P*) in units of N m{-2}" # "Snow conductivity (ks) in units of W m{-1} K{-1} " # "Minimum thickness of ice created in leads (h0) in units of m" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Key Parameter Values Values of key parameters 6.1. Typical Parameters Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N What values were specificed for the following parameters if used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Additional Parameters Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N If you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.description') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Assumptions Assumptions made in the sea ice model 7.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General overview description of any key assumptions made in this model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. On Diagnostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Note any assumptions that specifically affect the CMIP6 diagnostic sea ice variables. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Missing Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation Conservation in the sea ice component 8.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Provide a general description of conservation methodology. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.properties') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Energy" # "Mass" # "Salt" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Properties Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Properties conserved in sea ice by the numerical schemes. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.budget') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Budget Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 For each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3 End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.4. Was Flux Correction Used Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does conservation involved flux correction? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Corrected Conserved Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List any variables which are conserved by more than the numerical scheme alone. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Ocean grid" # "Atmosphere Grid" # "Own Grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9. Grid --&gt; Discretisation --&gt; Horizontal Sea ice discretisation in the horizontal 9.1. Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Grid on which sea ice is horizontal discretised? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Structured grid" # "Unstructured grid" # "Adaptive grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9.2. Grid Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the type of sea ice grid? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Finite differences" # "Finite elements" # "Finite volumes" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9.3. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the advection scheme? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 9.4. Thermodynamics Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the time step in the sea ice model thermodynamic component in seconds. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 9.5. Dynamics Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the time step in the sea ice model dynamic component in seconds. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.6. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional horizontal discretisation details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Zero-layer" # "Two-layers" # "Multi-layers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Grid --&gt; Discretisation --&gt; Vertical Sea ice vertical properties 10.1. Layering Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N What type of sea ice vertical layers are implemented for purposes of thermodynamic calculations? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 10.2. Number Of Layers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using multi-layers specify how many. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional vertical grid details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 11. Grid --&gt; Seaice Categories What method is used to represent sea ice categories ? 11.1. Has Mulitple Categories Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Set to true if the sea ice model has multiple sea ice categories. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.2. Number Of Categories Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using sea ice categories specify how many. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.3. Category Limits Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using sea ice categories specify each of the category limits. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.4. Ice Thickness Distribution Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the sea ice thickness distribution scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.other') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.5. Other Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12. Grid --&gt; Snow On Seaice Snow on sea ice details 12.1. Has Snow On Ice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is snow on ice represented in this model? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 12.2. Number Of Snow Levels Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of vertical levels of snow on ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.3. Snow Fraction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how the snow fraction on sea ice is determined End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.4. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional details related to snow on ice. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.horizontal_transport') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Incremental Re-mapping" # "Prather" # "Eulerian" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Dynamics Sea Ice Dynamics 13.1. Horizontal Transport Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of horizontal advection of sea ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Incremental Re-mapping" # "Prather" # "Eulerian" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Transport In Thickness Space Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of sea ice transport in thickness space (i.e. in thickness categories)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Hibler 1979" # "Rothrock 1975" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.3. Ice Strength Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Which method of sea ice strength formulation is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.redistribution') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Rafting" # "Ridging" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.4. Redistribution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Which processes can redistribute sea ice (including thickness)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.rheology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Free-drift" # "Mohr-Coloumb" # "Visco-plastic" # "Elastic-visco-plastic" # "Elastic-anisotropic-plastic" # "Granular" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.5. Rheology Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Rheology, what is the ice deformation formulation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pure ice latent heat (Semtner 0-layer)" # "Pure ice latent and sensible heat" # "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)" # "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Thermodynamics --&gt; Energy Processes related to energy in sea ice thermodynamics 14.1. Enthalpy Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the energy formulation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pure ice" # "Saline ice" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.2. Thermal Conductivity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What type of thermal conductivity is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Conduction fluxes" # "Conduction and radiation heat fluxes" # "Conduction, radiation and latent heat transport" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.3. Heat Diffusion Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of heat diffusion? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Heat Reservoir" # "Thermal Fixed Salinity" # "Thermal Varying Salinity" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.4. Basal Heat Flux Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method by which basal ocean heat flux is handled? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.5. Fixed Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.6. Heat Content Of Precipitation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method by which the heat content of precipitation is handled. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.7. Precipitation Effects On Salinity Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Thermodynamics --&gt; Mass Processes related to mass in sea ice thermodynamics 15.1. New Ice Formation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method by which new sea ice is formed in open water. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Ice Vertical Growth And Melt Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method that governs the vertical growth and melt of sea ice. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Floe-size dependent (Bitz et al 2001)" # "Virtual thin ice melting (for single-category)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.3. Ice Lateral Melting Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of sea ice lateral melting? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.4. Ice Surface Sublimation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method that governs sea ice surface sublimation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.5. Frazil Ice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method of frazil ice formation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 16. Thermodynamics --&gt; Salt Processes related to salt in sea ice thermodynamics. 16.1. Has Multiple Sea Ice Salinities Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 16.2. Sea Ice Salinity Thermal Impacts Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does sea ice salinity impact the thermal properties of sea ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Prescribed salinity profile" # "Prognostic salinity profile" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Thermodynamics --&gt; Salt --&gt; Mass Transport Mass transport of salt 17.1. Salinity Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is salinity determined in the mass transport of salt calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 17.2. Constant Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant salinity value specify this value in PSU? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the salinity profile used. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Prescribed salinity profile" # "Prognostic salinity profile" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Thermodynamics --&gt; Salt --&gt; Thermodynamics Salt thermodynamics 18.1. Salinity Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is salinity determined in the thermodynamic calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 18.2. Constant Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant salinity value specify this value in PSU? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the salinity profile used. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Virtual (enhancement of thermal conductivity, thin ice melting)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Thermodynamics --&gt; Ice Thickness Distribution Ice thickness distribution details. 19.1. Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is the sea ice thickness distribution represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Parameterised" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Thermodynamics --&gt; Ice Floe Size Distribution Ice floe-size distribution details. 20.1. Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is the sea ice floe-size represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20.2. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Please provide further details on any parameterisation of floe-size. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 21. Thermodynamics --&gt; Melt Ponds Characteristics of melt ponds. 21.1. Are Included Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are melt ponds included in the sea ice model? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Flocco and Feltham (2010)" # "Level-ice melt ponds" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21.2. Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What method of melt pond formulation is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Albedo" # "Freshwater" # "Heat" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21.3. Impacts Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N What do melt ponds have an impact on? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') # PROPERTY VALUE(S): # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22. Thermodynamics --&gt; Snow Processes Thermodynamic processes in snow on sea ice 22.1. Has Snow Aging Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Set to True if the sea ice model has a snow aging scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.2. Snow Aging Scheme Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the snow aging scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22.3. Has Snow Ice Formation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Set to True if the sea ice model has snow ice formation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.4. Snow Ice Formation Scheme Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the snow ice formation scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.5. Redistribution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the impact of ridging on snow cover? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Single-layered heat diffusion" # "Multi-layered heat diffusion" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.6. Heat Diffusion Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the heat diffusion through snow methodology in sea ice thermodynamics? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Delta-Eddington" # "Parameterized" # "Multi-band albedo" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiative Processes Sea Ice Radiative Processes 23.1. Surface Albedo Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method used to handle surface albedo. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Delta-Eddington" # "Exponential attenuation" # "Ice radiation transmission per category" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23.2. Ice Radiation Transmission Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method by which solar radiation through sea ice is handled. End of explanation """
tensorflow/docs-l10n
site/en-snapshot/tensorboard/graphs.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2019 The TensorFlow Authors. End of explanation """ # Load the TensorBoard notebook extension. %load_ext tensorboard from datetime import datetime from packaging import version import tensorflow as tf from tensorflow import keras print("TensorFlow version: ", tf.__version__) assert version.parse(tf.__version__).release[0] >= 2, \ "This notebook requires TensorFlow 2.0 or above." import tensorboard tensorboard.__version__ # Clear any logs from previous runs !rm -rf ./logs/ """ Explanation: Examining the TensorFlow Graph <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tensorboard/graphs"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorboard/blob/master/docs/graphs.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/tensorboard/blob/master/docs/graphs.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/tensorboard/docs/graphs.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> Overview TensorBoard’s Graphs dashboard is a powerful tool for examining your TensorFlow model. You can quickly view a conceptual graph of your model’s structure and ensure it matches your intended design. You can also view a op-level graph to understand how TensorFlow understands your program. Examining the op-level graph can give you insight as to how to change your model. For example, you can redesign your model if training is progressing slower than expected. This tutorial presents a quick overview of how to generate graph diagnostic data and visualize it in TensorBoard’s Graphs dashboard. You’ll define and train a simple Keras Sequential model for the Fashion-MNIST dataset and learn how to log and examine your model graphs. You will also use a tracing API to generate graph data for functions created using the new tf.function annotation. Setup End of explanation """ # Define the model. model = keras.models.Sequential([ keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(32, activation='relu'), keras.layers.Dropout(0.2), keras.layers.Dense(10, activation='softmax') ]) model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) """ Explanation: Define a Keras model In this example, the classifier is a simple four-layer Sequential model. End of explanation """ (train_images, train_labels), _ = keras.datasets.fashion_mnist.load_data() train_images = train_images / 255.0 """ Explanation: Download and prepare the training data. End of explanation """ # Define the Keras TensorBoard callback. logdir="logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir) # Train the model. model.fit( train_images, train_labels, batch_size=64, epochs=5, callbacks=[tensorboard_callback]) """ Explanation: Train the model and log data Before training, define the Keras TensorBoard callback, specifying the log directory. By passing this callback to Model.fit(), you ensure that graph data is logged for visualization in TensorBoard. End of explanation """ %tensorboard --logdir logs """ Explanation: Op-level graph Start TensorBoard and wait a few seconds for the UI to load. Select the Graphs dashboard by tapping “Graphs” at the top. End of explanation """ !tensorboard dev upload \ --logdir logs \ --name "Sample op-level graph" \ --one_shot """ Explanation: You can also optionally use TensorBoard.dev to create a hosted, shareable experiment. End of explanation """ # The function to be traced. @tf.function def my_func(x, y): # A simple hand-rolled layer. return tf.nn.relu(tf.matmul(x, y)) # Set up logging. stamp = datetime.now().strftime("%Y%m%d-%H%M%S") logdir = 'logs/func/%s' % stamp writer = tf.summary.create_file_writer(logdir) # Sample data for your function. x = tf.random.uniform((3, 3)) y = tf.random.uniform((3, 3)) # Bracket the function call with # tf.summary.trace_on() and tf.summary.trace_export(). tf.summary.trace_on(graph=True, profiler=True) # Call only one tf.function when tracing. z = my_func(x, y) with writer.as_default(): tf.summary.trace_export( name="my_func_trace", step=0, profiler_outdir=logdir) %tensorboard --logdir logs/func """ Explanation: By default, TensorBoard displays the op-level graph. (On the left, you can see the “Default” tag selected.) Note that the graph is inverted; data flows from bottom to top, so it’s upside down compared to the code. However, you can see that the graph closely matches the Keras model definition, with extra edges to other computation nodes. Graphs are often very large, so you can manipulate the graph visualization: Scroll to zoom in and out Drag to pan Double clicking toggles node expansion (a node can be a container for other nodes) You can also see metadata by clicking on a node. This allows you to see inputs, outputs, shapes and other details. <!-- <img class="tfo-display-only-on-site" src="https://github.com/tensorflow/tensorboard/blob/master/docs/images/graphs_computation.png?raw=1"/> --> <!-- <img class="tfo-display-only-on-site" src="https://github.com/tensorflow/tensorboard/blob/master/docs/images/graphs_computation_detail.png?raw=1"/> --> Conceptual graph In addition to the execution graph, TensorBoard also displays a conceptual graph. This is a view of just the Keras model. This may be useful if you’re reusing a saved model and you want to examine or validate its structure. To see the conceptual graph, select the “keras” tag. For this example, you’ll see a collapsed Sequential node. Double-click the node to see the model’s structure: <!-- <img class="tfo-display-only-on-site" src="https://github.com/tensorflow/tensorboard/blob/master/docs/images/graphs_tag_selection.png?raw=1"/> --> <!-- <img class="tfo-display-only-on-site" src="https://github.com/tensorflow/tensorboard/blob/master/docs/images/graphs_conceptual.png?raw=1"/> --> Graphs of tf.functions The examples so far have described graphs of Keras models, where the graphs have been created by defining Keras layers and calling Model.fit(). You may encounter a situation where you need to use the tf.function annotation to "autograph", i.e., transform, a Python computation function into a high-performance TensorFlow graph. For these situations, you use TensorFlow Summary Trace API to log autographed functions for visualization in TensorBoard. To use the Summary Trace API: Define and annotate a function with tf.function Use tf.summary.trace_on() immediately before your function call site. Add profile information (memory, CPU time) to graph by passing profiler=True With a Summary file writer, call tf.summary.trace_export() to save the log data You can then use TensorBoard to see how your function behaves. <br/> End of explanation """
ultiyuan/test0
lessons/yuan coursework.ipynb
gpl-2.0
import numpy from matplotlib import pyplot %matplotlib inline #Import the required functions from VortexPanel.py and BoundaryLayer.py from VortexPanel import Panel, solve_gamma, plot_flow, make_circle pyplot.figure(figsize=(10,6)) def c_p(gamma): return 1-gamma**2 def C_P(theta): return 1-4*(numpy.sin(theta))**2 N_resolution=[32,64,128] for m in range(3): circle = make_circle(N_resolution[m]) solve_gamma(circle) coeff = numpy.zeros(N_resolution[m]) for i,p_i in enumerate(circle): coeff[i]=c_p(p_i.gamma) pyplot.plot(numpy.linspace(1,N_resolution[m],N_resolution[m])/N_resolution[m]*numpy.pi*2,coeff) pyplot.legend(["resolutions(32)","resolutions(64)","resolutions(128)"]) pyplot.title('$C_p$ of the circular cylinder under different resolution') pyplot.xlabel('phase angle /rad') pyplot.ylabel('$C_p$') pyplot.plot(numpy.linspace(0,numpy.pi*2,180),1-4*numpy.sin(numpy.linspace(0,numpy.pi*2,180))**2,lw=2, c='k') """ Explanation: Numerical Hydrodynamics Assignment Force prediction on three geometries The assignment is to use numerical methods to generate pressure and friction force predictions for the laminar flow around three geometries: A circular cylinder, A jukowski foil with $t/c=0.15$, and A mystery geometry which will be assigned individually. Specifically, for the circular cylinder you will: - Compute the pressure coefficient $c_p$ on the surface for three resolutions $N=32,64,128$, and compare the results to the exact potential flow solution. - Compute the friction drag coefficient $C_F$, and an estimate for the pressure drag coefficient $C_D$ for the same three resolutions at $Re_D=10^5$. For the jukowski foil you will: - Compute the lift coefficient $C_L$ as a function of the angle of attack $\alpha\le 10^o$ for three resolutions $N=32,64,128$, and and compare the results to the exact potential flow solution. - Compute the separation point location on either side of the foil for the same angle of attack range for $N=128$. The mystery geometry will require a similar computation. Correct results for this geometry will demonstrate independent excellence and will be marked accordingly. 1. The circular cylinder: For the circular cylinder you will: - Compute the pressure coefficient $c_p$ on the surface for three resolutions $N=32,64,128$, and compare the results to the exact potential flow solution. - Compute the friction drag coefficient $C_F$, and an estimate for the pressure drag coefficient $C_D$ for the same three resolutions at $Re_D=10^5$. Firstly, I use the numpy to caculate the velocity u and v: $$u(x,y) = \frac{\gamma}{2\pi}\left[\tan^{-1}\left(\frac{x-S}y\right)-\tan^{-1}\left(\frac{x+S}y\right)\right].$$ $$v(x,y) =\frac{\gamma}{4\pi} \log\left(\frac{(x+S)^2+y^2}{(x-S)^2+y^2}\right).$$ It has the code: End of explanation """ def pohlF(eta): return 2*eta-2*eta**3+eta**4 def pohlG(eta): return eta/6*(1-eta)**3 def pohl(eta,lam): return pohlF(eta)+lam*pohlG(eta) """ Explanation: The black line is the exact potential flow solution, the figure shows when we increase the resolutions which means use more numbers of pannels to model can make the result more accurate. (2)Compute the friction drag coefficient $C_F$, and an estimate for the pressure drag coefficient $C_D$ for the same three resolutions at $Re_D=10^5$. End of explanation """ def disp_ratio(lam): return 3./10.-lam/120. def mom_ratio(lam): return 37./315.-lam/945.-lam**2/9072. def df_0(lam): return 2+lam/6. """ Explanation: $\frac{\delta_1}\delta = \int_0^1 (1-f) d\eta = \frac3{10}-\lambda\frac1{120}$ $\frac{\delta_2}\delta = \int_0^1 f(1-f) d\eta = \frac{37}{315}-\lambda\frac1{945}-\lambda^2\frac1{9072}$ $\frac 12 c_f Re_\delta =f'(0)= 2+\lambda\frac1{6}$ End of explanation """ def g_1(lam): return df_0(lam)-lam*(disp_ratio(lam)+2*mom_ratio(lam)) from scipy.optimize import bisect lam0 = bisect(g_1,-12,12) # use bisect method to find root between -12...12 print 'lambda_0 = ',lam0 def ddx_delta(Re_d,lam): if Re_d==0: return 0 # Stagnation point condition return g_1(lam)/mom_ratio(lam)/Re_d # delta' def heun(g,psi_i,i,dx,*args): g_i = g(psi_i,i,*args) # integrand at i tilde_psi = psi_i+g_i*dx # predicted estimate at i+1 g_i_1 = g(tilde_psi,i+1,*args) # integrand at i+1 return psi_i+0.5*(g_i+g_i_1)*dx # corrected estimate def g_pohl(delta_i,i,u_e,du_e,nu): Re_d = delta_i*u_e[i]/nu # compute local Reynolds number lam = delta_i**2*du_e[i]/nu # compute local lambda return ddx_delta(Re_d,lam) # get derivative def march(x,u_e,du_e,nu): delta0 = numpy.sqrt(lam0*nu/du_e[0]) # set delta0 delta = numpy.full_like(x,delta0) # delta array lam = numpy.full_like(x,lam0) # lambda array for i in range(len(x)-1): # march! delta[i+1] = heun(g_pohl,delta[i],i,x[i+1]-x[i], # integrate BL using... u_e,du_e,nu) # additional arguments lam[i+1] = delta[i+1]**2*du_e[i+1]/nu # compute lambda if abs(lam[i+1])>12: break # check stop condition return delta,lam,i # return with separation index nu=1e-5 # viscosity N = 32 # number of steps s = numpy.linspace(0,numpy.pi,N) # distance goes from 0..pi u_e = 2.*numpy.sin(s) # velocity du_e = 2.*numpy.cos(s) # gradient delta,lam,iSep = march(s,u_e,du_e,nu) # solve! def half_c_f(lam, nu, delta, u_e): Re_d = delta*u_e/nu return df_0(lam)/Re_d def taw_w(lam, nu, delta, u_e): if u_e == 0: return 0 else: return half_c_f(lam, nu, delta, u_e)*u_e**2 def C_F_calc(tau, sx, N): #return numpy.sum(tau[:iSep+1]*sx*numpy.pi/(N-1)) return numpy.trapz(tau[:iSep+1]*sx, dx = numpy.pi/(N-1)) for N in N_resolution: s = numpy.linspace(0,numpy.pi,N) # distance goes from 0..pi u_e = 2.*numpy.sin(s) # velocity du_e = 2.*numpy.cos(s) # gradient delta, lam, iSep = march(s,u_e,du_e,nu) # solve! taw = numpy.full_like(delta, 0) taw = [taw_w(lam[i], nu, delta[i], u_e[i]) for i in range(N)] sx = numpy.sin(s[0:iSep+1]) print ('When N = ' + '%i' %N) C_F_circle = 2*C_F_calc(taw, sx, N)/numpy.pi print ('Circle frictional coefficient = ' + '%0.2e' %C_F_circle) C_F_flat = 1.33 * numpy.sqrt(nu/numpy.pi) print("Flate plate: "+'%0.2e' %C_F_flat) """ Explanation: $$ g_1(\lambda) = \frac 12 c_f Re_\delta - \lambda \left[\frac{\delta_1}{\delta}+2\frac{\delta_2}\delta\right]$$ End of explanation """ s_x = numpy.sin(s) pyplot.figure(figsize=(8,6)) pyplot.plot(s,taw*s_x) pyplot.scatter(s[iSep+1], taw[iSep+1]*s_x[iSep+1],s=50,c="r") pyplot.xlabel('$s$',size=20) pyplot.ylabel(r'$\tau_w s_x$', size=20) """ Explanation: By using the code numpy.trapz to integrate and get the drag coefficient in three resolutions are: | Resolution | Circle | Flat plat | |------------|------------|-----------| | 32 | 4.04e-03 | 2.37e-3 | | 64 | 4.06e-03 | 2.37e-3 | | 128 | 4.07e-03 | 2.37e-3 | End of explanation """ def cylinder_separation(s,iSep): c1=C_P(s[0:iSep+1]) c2=C_P(s[iSep])*numpy.ones(s.size-iSep-1) return numpy.concatenate((c1,c2),axis=0) N=64 s = numpy.linspace(0,numpy.pi,N) # distance goes from 0..pi u_e = 2.*numpy.sin(s) # velocity du_e = 2.*numpy.cos(s) # gradient delta, lam, iSep = march(s,u_e,du_e,nu) # solve! C_P_s= cylinder_separation(s,iSep) pyplot.figure(figsize=(10,6)) pyplot.plot(s,cylinder_separation(s,iSep)) pyplot.xlabel('AoA alpha (rad)',size=20) pyplot.ylabel('$Pressure coefficient C_p$', size=20) pyplot.scatter(s[iSep+1],C_P_s[iSep+1],s=50,c="r") pyplot.legend(["$C_p$"]) def coefficient_Cd(N,sy,C_P_s): return numpy.trapz(C_P_s*sy, dx=numpy.pi/(N-1)) for N in N_resolution: s = numpy.linspace(0,numpy.pi,N) # distance goes from 0..pi s_y=numpy.cos(s) u_e = 2.*numpy.sin(s) # velocity du_e = 2.*numpy.cos(s) # gradient delta, lam, iSep = march(s,u_e,du_e,nu) # solve! C_P_s=cylinder_separation(s,iSep) print ("The drag coefficient is:") print coefficient_Cd(N,s_y,C_P_s) """ Explanation: Estimate for the pressure drag coefficient CD End of explanation """ from VortexPanel import Panel,solve_gamma_kutta,plot_flow,make_jukowski,make_circle N = 64 foil = make_jukowski(N) #calculate C_L in a range of angle of attack def C_L_AA(foil,alpha_max,M): C_L=numpy.zeros(M) for i in range(M): alpha=alpha_max*i*numpy.pi/180./(M-1) solve_gamma_kutta(foil,alpha) # solve for gamma C_L[i]=lift(foil) # print the lift return C_L def CLA(rr,alpha): return 2.*numpy.pi*(1+4./numpy.sqrt(3)/3.*rr)*numpy.sin(alpha) def lift(panels): c = panels[0].x[0]-panels[len(panels)/2].x[0] # length scale return -4./c*numpy.sum([p.gamma*p.S for p in panels]) pyplot.figure(figsize=(10,6)) N=[32,64,128] M=101 alpha_max=10. for p in range(3): Na=N[p] foil1 = make_jukowski(Na,dx=0.15,dy=0,dr=0) # make foil coff1=C_L_AA(foil1,10,M) pyplot.plot(numpy.linspace(1,M,M)/M*alpha_max,coff1) M=101 rr=0.15 coff2=numpy.zeros(M) for q in range(M): alpha=alpha_max*q*numpy.pi/180./(M-1) coff2[q]=CLA(rr,alpha) pyplot.plot(numpy.linspace(1,M,M)/M*alpha_max,coff2) pyplot.xlabel('Angle of attack /deg') pyplot.ylabel('$C_L$') pyplot.legend(["N=32","N=64","N=128","Analytical"]) """ Explanation: 2. A jukowski foil with $t/c=0.15$ For the jukowski foil you will: - Compute the lift coefficient $C_L$ as a function of the angle of attack $\alpha\le 10^o$ for three resolutions $N=32,64,128$, and and compare the results to the exact potential flow solution. - Compute the separation point location on either side of the foil for the same angle of attack range for $N=128$. End of explanation """ from BoundaryLayer import Pohlhausen, march def solve_plot_boundary_layers(panels,alpha=0,nu=1e-5): # split the panels top_panels,bottom_panels = split_panels(panels) # Set up and solve the top boundary layer top = Pohlhausen(top_panels,nu) top.march() # Set up and solve the bottom boundary layer bottom = Pohlhausen(bottom_panels,nu) bottom.march() return top,bottom def predict_jukowski_separation(t_c,alpha=0,N=128): #Function from separation prediction and altered to return a value. # set dx to gets the correct t/c foil = make_jukowski(N,dx=t_c-0.019) #t_c-0.019 is the shift from t/c to dx # find and print t/c x0 = foil[N/2].xc c = foil[0].xc-x0 t = 2.*numpy.max([p.yc for p in foil]) #print "t/c = "+"%.3f"%(t/c) # solve potential flow and boundary layer evolution solve_gamma_kutta(foil,alpha) top,bottom = solve_plot_boundary_layers(foil,alpha) #Return point of seperation return (top.x_sep-x0)/c, (bottom.x_sep-x0)/c degrees = numpy.linspace(0, 10, 11) #For the graph axis alpha = numpy.linspace(0, 10./180*numpy.pi, 11) def split_panels(panels): # positive velocity defines `top` BL top = [p for p in panels if p.gamma<=0] # negative defines the `bottom` bottom = [p for p in panels if p.gamma>=0] # reverse array so panel[0] is stagnation bottom = bottom[::-1] return top,bottom sep_point = [] #Create empty list as it's easy. for a in alpha: sep_point.append(predict_jukowski_separation(0.15, a)) sep_point = numpy.array(sep_point) # Turn sep_point from list into an array pyplot.figure(figsize=(10,7)) pyplot.ylabel(r'$\frac{x}{c}$', fontsize=24) pyplot.xlabel(r'Angle of Attack $^o$', fontsize=16) pyplot.plot(degrees, sep_point[:,0], label=r'Top Separation Point') pyplot.plot(degrees, sep_point[:,1], label=r'Bottom Separation Point') pyplot.legend(loc='right') pyplot.show() """ Explanation: As we ca see in the picture: when the resolution gets higher, the lift coefficient which we calculate is closer to the analytical value. (2) Compute the separation point: End of explanation """ def camber(t_c, alpha, dy, N=128): #Function to find empirical relationship between dy and camber. dx=t_c-0.019 foil = make_jukowski(N, dx, dy) #plot_flow(foil, alpha) #Can be commented in to inspect the shape of the foil a = int(N/4) b = int((3*N)/4) y_bar = 0.5*(foil[a].yc + foil[b].yc) return y_bar trial_dy = numpy.linspace(0, -1, 30) #Range of experimental values for dy y_bar = [] #Create empty list. for i in trial_dy: y_bar.append(camber(0.15, 0, i, 128)) #Investigating the relationship between dy and y_bar def camber_emp(trial_dy, y_bar): from sklearn.linear_model import LinearRegression #This is a module developed for machine learning. It is useful for this sort of analysis. X = trial_dy[:, numpy.newaxis] regr = LinearRegression() regr.fit(X, y_bar) fig = pyplot.figure(figsize=(8,6)) ax = fig.add_subplot(1,1,1) pyplot.ylabel(r"$\bar y$ ",fontsize=16) pyplot.xlabel("dy ",fontsize=16) pyplot.plot(trial_dy, y_bar) pyplot.gca().set_xlim(left=-1) pyplot.gca().set_ylim(bottom=0) print("Multiply the dy value by %.5f to obtain the ybar/c value for the foil geometry chosen." % regr.coef_) pyplot.show() return regr.coef_ coef = camber_emp(trial_dy, y_bar) """ Explanation: As we can see in the picture, when the angle of attack increases the top separation point move to the leading edge, however, the bottom separation point move to the trailing edge. A mystery geometry Cambered Jukowski foil The geometry is the 15% Jukowski foil with camber, quantified by the height of the mean line at $x=0$, ie $\bar y = \frac 12 (y[N/4]+y[3N/4])$. Assignment Compute the lift force coefficient $C_L$ and boundary layer separation point locations for $\bar y/c = 0.02,0.04,0.08$ at $\alpha=0$ using $N=128$ panels, and compare to the symmetric foil solution. End of explanation """ def jukowski_CL(alpha,t_c=0.15+0.019): return 2.*numpy.pi*(1+4/3/numpy.sqrt(3)*t_c)*numpy.sin(alpha) def ybar_c(y_bar,c): ybar_c=y_bar/c return ybar_c def predict_jukowski_separation_camber(t_c, ybar_c, alpha=0,N=128): #Function from separation prediction and altered to account for camber. # set dx to gets the correct t/c dx = t_c -0.019 dy = ybar_c/(-0.84675) foil = make_jukowski(N, dx, dy) #t_c-0.019 is the shift from t/c to dx x0 = foil[N/2].xc c = foil[0].xc-x0 t = 2.*numpy.max([p.yc for p in foil]) # solve potential flow and boundary layer evolution solve_gamma_kutta(foil,alpha) top,bottom = solve_plot_boundary_layers(foil,alpha) #Return point of separation return (top.x_sep-x0)/c, (bottom.x_sep-x0)/c def calc_CL_camb(alpha, N, t_c, ybar_c): #Function from earlier in this report altered to account for camber. dx = t_c - 0.019 dy = ybar_c/(-0.84675) foil = make_jukowski(N, dx, dy) solve_gamma_kutta(foil, alpha) return lift(foil) fill_val = jukowski_CL(0) #The lift coefficient for the symmetric foil. analytic = numpy.full_like(ybar_c, fill_val) #Python will get sad unless it gets an array of analytical C_L to plot. analytic ybar_c = [0.02, 0.04, 0.08] #List of cambers. c_l = [] for i in ybar_c: c_l.append(calc_CL_camb(0, 128, 0.15, i)) sep_points_camber = [] for i in ybar_c: sep_points_camber.append(predict_jukowski_separation_camber(0.15, i, 0, 128)) sep_points_camber = numpy.array(sep_points_camber) """ Explanation: we can see the realtionship between dy and y bar is linear correlation. End of explanation """
tpin3694/tpin3694.github.io
machine-learning/stemming_words.ipynb
mit
# Load library from nltk.stem.porter import PorterStemmer """ Explanation: Title: Stemming Words Slug: stemming_words Summary: How to stem words in unstructured text data for machine learning in Python. Date: 2016-09-09 12:00 Category: Machine Learning Tags: Preprocessing Text Authors: Chris Albon <a alt="Stemming Words" href="https://machinelearningflashcards.com"> <img src="stemming_words/Stemming_Words_print.png" class="flashcard center-block"> </a> Preliminaries End of explanation """ # Create word tokens tokenized_words = ['i', 'am', 'humbled', 'by', 'this', 'traditional', 'meeting'] """ Explanation: Create Text Data End of explanation """ # Create stemmer porter = PorterStemmer() # Apply stemmer [porter.stem(word) for word in tokenized_words] """ Explanation: Stem Words Stemming reduces a word to its stem by identifying and removing affixes (e.g. gerunds) while keeping the root meaning of the word. NLTK's PorterStemmer implements the widely used Porter stemming algorithm. End of explanation """
PyLCARS/PythonUberHDL
myHDL_DigLogicFundamentals/myHDL_Latches.ipynb
bsd-3-clause
import numpy as np import pandas as pd from sympy import * init_printing() from myhdl import * from myhdlpeek import * import random #python file of convince tools. Should be located with this notebook from sympy_myhdl_tools import * """ Explanation: \title{Digital Latches with myHDL} \author{Steven K Armour} \maketitle Refs @book{brown_vranesic_2014, place={New York, NY}, edition={3}, title={Fundamentals of digital logic with Verilog design}, publisher={McGraw-Hill}, author={Brown, Stephen and Vranesic, Zvonko G}, year={2014} }, @book{lameres_2017, title={Introduction to logic circuits & logic design with Verilog}, publisher={springer}, author={LaMeres, Brock J}, year={2017} } Acknowledgments Author of myHDL Jan Decaluwe and the author of the myHDL Peeker XESS Corp. Draw.io Xilinx Python Libraries Utilized End of explanation """ def SRLatch(S_in, rst, Q_out, Qn_out): @always_comb def logic(): if S_in and rst==0: Q_out.next=1 Qn_out.next=0 elif S_in==0 and rst: Q_out.next=0 Qn_out.next=1 elif S_in and rst: Q_out.next=0 Qn_out.next=0 return logic S_in, rst, Q_out, Qn_out=[Signal(bool(0)) for _ in range(4)] Peeker.clear() Peeker(S_in, 'S_in'); Peeker(rst, 'rst') Peeker(Q_out, 'Q_out'); Peeker(Qn_out, 'Qn_out') DUT=SRLatch(S_in=S_in, rst=rst, Q_out=Q_out, Qn_out=Qn_out) inputs=[S_in, rst] sim=Simulation(DUT, Combo_TB(inputs), *Peeker.instances()).run() Peeker.to_wavedrom(start_time=0, stop_time=2*2**len(inputs), tock=True, title='SRLatch Behavioral simulation', caption=f'after clock cycle {2**len(inputs)-1} ->random input') MakeDFfromPeeker(Peeker.to_wavejson(start_time=0, stop_time=2**len(inputs) -1)) """ Explanation: Latches vs Flip-Flops Latches and Flip-Flops are both metastaple logic circuit tobologies in that once loaded with a state they hold that state information till that state is upset by a new state or a reset command. But the diffrance between the two is that Flip-Flops are clock controlled devices built upon Latches where as Latches are not clock dependent SR-Latch Symbol and Internals The Symbol for a SR-Latch and one representation of it's internals is shown below <img style="float: center;" src="SRLatchSymbolInternal.jpg"> Definition State Diagram myHDL SR-Latch Gate and Testing Need Help Getting this Latch via Combo Cirucits working geting AlwayCombError in using out signal as argument in out signals next state out myHDL SR-Latch Behavioral and Testing End of explanation """ toVerilog(SRLatch, S_in, rst, Q_out, Qn_out) #toVHDL(SRLatch, S_in, rst, Q_out, Qn_out) _=VerilogTextReader('SRLatch') """ Explanation: myHDL SR-Latch Behavioral HDL Synthesis End of explanation """ def GSRLatch(S_in, rst, ena, Q_out, Qn_out): @always_comb def logic(): if ena: if S_in and rst==0: Q_out.next=1 Qn_out.next=0 elif S_in==0 and rst: Q_out.next=0 Qn_out.next=1 elif S_in and rst: Q_out.next=0 Qn_out.next=0 else: pass return logic S_in, rst, ena, Q_out, Qn_out=[Signal(bool(0)) for _ in range(5)] Peeker.clear() Peeker(S_in, 'S_in'); Peeker(rst, 'rst'); Peeker(ena, 'ena') Peeker(Q_out, 'Q_out'); Peeker(Qn_out, 'Qn_out') DUT=GSRLatch(S_in=S_in, rst=rst, ena=ena, Q_out=Q_out, Qn_out=Qn_out) inputs=[S_in, rst, ena] sim=Simulation(DUT, Combo_TB(inputs), *Peeker.instances()).run() Peeker.to_wavedrom(start_time=0, stop_time=2*2**len(inputs), tock=True, title='GSRLatch Behavioral simulation', caption=f'after clock cycle {2**len(inputs)-1} ->random input') MakeDFfromPeeker(Peeker.to_wavejson(start_time=0, stop_time=2**len(inputs) -1)) """ Explanation: The following shows the Xilinx's Vivado 2016.1 RTL generated schematic of our Behaviorla SRLatch from the synthesised verilog code. We can see that the systhizied version is quite apstract from fig lakdfjkaj. <img style="float: center;" src="SRLatchBehaviroalRTLSch.PNG"> Gated SR-Latch myHDL SR-Latch Behavioral and Testing End of explanation """ toVerilog(GSRLatch, S_in, rst, ena, Q_out, Qn_out) #toVHDL(GSRLatch, S_in, rst,ena, Q_out, Qn_out) _=VerilogTextReader('GSRLatch') """ Explanation: myHDL SR-Latch Behavioral HDL Synthesis End of explanation """ def DLatch(D_in, ena, Q_out, Qn_out): #Normal Qn_out is not specifed since a not gate is so easily implimented @always_comb def logic(): if ena: Q_out.next=D_in Qn_out.next=not D_in return logic D_in, ena, Q_out, Qn_out=[Signal(bool(0)) for _ in range(4)] Peeker.clear() Peeker(D_in, 'D_in'); Peeker(ena, 'ena') Peeker(Q_out, 'Q_out'); Peeker(Qn_out, 'Qn_out') DUT=DLatch(D_in=D_in, ena=ena, Q_out=Q_out, Qn_out=Qn_out) inputs=[D_in, ena] sim=Simulation(DUT, Combo_TB(inputs), *Peeker.instances()).run() Peeker.to_wavedrom(start_time=0, stop_time=2*2**len(inputs), tock=True, title='DLatch Behavioral simulation', caption=f'after clock cycle {2**len(inputs)-1} ->random input') MakeDFfromPeeker(Peeker.to_wavejson(start_time=0, stop_time=2**len(inputs) -1)) """ Explanation: The following shows the Xilinx's Vivado 2016.1 RTL generated schematic of our Behaviorla Gated SRLatch from the synthesised verilog code. We can see that the systhizied version is quite apstract from fig lakdfjkaj. <img style="float: center;" src="GSRLatchBehaviroalRTLSch.PNG"> D-Latch myHDL Behavioral D-Latch and Testing End of explanation """ toVerilog(DLatch, D_in, ena, Q_out, Qn_out) #toVHDL(DLatch,D_in, ena, Q_out, Qn_out) _=VerilogTextReader('DLatch') """ Explanation: myHDL DLatch Behavioral HDL Synthesis End of explanation """
jdhp-docs/python-notebooks
python_numpy_fourier_transform_en.ipynb
mit
import numpy as np import matplotlib.pyplot as plt from matplotlib import cm """ Explanation: Fast Fourier Transform snippets Documentation Numpy implementation: http://docs.scipy.org/doc/numpy/reference/routines.fft.html Scipy implementation: http://docs.scipy.org/doc/scipy/reference/fftpack.html Import directives End of explanation """ pattern = np.zeros((4, 4)) pattern[1:3,1:3] = 1 pattern signal = np.tile(pattern, (2, 2)) fig = plt.figure(figsize=(16.0, 10.0)) ax = fig.add_subplot(111) ax.imshow(signal, interpolation='nearest', cmap=cm.gray) """ Explanation: Make data End of explanation """ transformed_signal = np.fft.fft2(signal) #transformed_signal fig = plt.figure(figsize=(16.0, 10.0)) ax = fig.add_subplot(111) ax.imshow(abs(transformed_signal), interpolation='nearest', cmap=cm.gray) """ Explanation: Fourier transform with Numpy Do the fourier transform End of explanation """ max_value = np.max(abs(transformed_signal)) filtered_transformed_signal = transformed_signal * (abs(transformed_signal) > max_value*0.5) #filtered_transformed_signal[6, 6] = 0 #filtered_transformed_signal[2, 2] = 0 #filtered_transformed_signal[2, 6] = 0 #filtered_transformed_signal[6, 2] = 0 #filtered_transformed_signal[1, 6] = 0 #filtered_transformed_signal[6, 1] = 0 #filtered_transformed_signal[1, 2] = 0 #filtered_transformed_signal[2, 1] = 0 #filtered_transformed_signal fig = plt.figure(figsize=(16.0, 10.0)) ax = fig.add_subplot(111) ax.imshow(abs(filtered_transformed_signal), interpolation='nearest', cmap=cm.gray) """ Explanation: Filter End of explanation """ filtered_signal = np.fft.ifft2(filtered_transformed_signal) #filtered_signal fig = plt.figure(figsize=(16.0, 10.0)) ax = fig.add_subplot(111) ax.imshow(abs(filtered_signal), interpolation='nearest', cmap=cm.gray) #shifted_filtered_signal = np.fft.ifftshift(transformed_signal) #shifted_filtered_signal #shifted_transformed_signal = np.fft.fftshift(transformed_signal) #shifted_transformed_signal """ Explanation: Do the reverse transform End of explanation """
blue-yonder/tsfresh
notebooks/examples/01 Feature Extraction and Selection.ipynb
mit
%matplotlib inline import matplotlib.pylab as plt from tsfresh import extract_features, extract_relevant_features, select_features from tsfresh.utilities.dataframe_functions import impute from tsfresh.feature_extraction import ComprehensiveFCParameters from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report """ Explanation: Feature Extraction and Selection This basic example shows how to use tsfresh to extract useful features from multiple timeseries and use them to improve classification performance. We use the robot execution failure data set as an example. End of explanation """ from tsfresh.examples import robot_execution_failures robot_execution_failures.download_robot_execution_failures() df, y = robot_execution_failures.load_robot_execution_failures() df.head() """ Explanation: Load and visualize data The data set documents 88 robot executions (each has a unique id between 1 and 88), which is a subset of the Robot Execution Failures Data Set. For the purpose of simplicity we are only differentiating between successfull and failed executions (y). For each execution 15 force (F) and torque (T) samples are given, which were measured at regular time intervals for the spatial dimensions x, y, and z. Therefore each row of the data frame references a specific execution (id), a time index (index) and documents the respective measurements of 6 sensors (F_x, F_y, F_z, T_x, T_y, T_z). End of explanation """ df[df.id == 3][['time', 'F_x', 'F_y', 'F_z', 'T_x', 'T_y', 'T_z']].plot(x='time', title='Success example (id 3)', figsize=(12, 6)); df[df.id == 20][['time', 'F_x', 'F_y', 'F_z', 'T_x', 'T_y', 'T_z']].plot(x='time', title='Failure example (id 20)', figsize=(12, 6)); """ Explanation: Let's draw some example executions: End of explanation """ # We are very explicit here and specify the `default_fc_parameters`. If you remove this argument, # the ComprehensiveFCParameters (= all feature calculators) will also be used as default. # Have a look into the documentation (https://tsfresh.readthedocs.io/en/latest/text/feature_extraction_settings.html) # or one of the other notebooks to learn more about this. extraction_settings = ComprehensiveFCParameters() X = extract_features(df, column_id='id', column_sort='time', default_fc_parameters=extraction_settings, # we impute = remove all NaN features automatically impute_function=impute) """ Explanation: Extract Features We can use the data to extract time series features using tsfresh. We want to extract features for each time series, that means for each robot execution (which is our id) and for each of the measured sensor values (F_* and T_*). You can think of it like this: tsfresh will result in a single row for each id and will calculate the features for each columns (we call them "kind") separately. The time column is our sorting column. For an overview on the data formats of tsfresh, please have a look at the documentation. End of explanation """ X.head() """ Explanation: X now contains for each robot execution (= id) a single row, with all the features tsfresh calculated based on the measured times series values for this id. End of explanation """ X_filtered = select_features(X, y) X_filtered.head() """ Explanation: <div class="alert alert-info"> Currently, 4674 non-NaN features are calculated. This number varies with the version of `tsfresh` and with your data. </div> Select Features Using the hypothesis tests implemented in tsfresh (see here for more information) it is now possible to select only the relevant features out of this large dataset. tsfresh will do a hypothesis test for each of the features to check, if it is relevant for your given target. End of explanation """ X_full_train, X_full_test, y_train, y_test = train_test_split(X, y, test_size=.4) X_filtered_train, X_filtered_test = X_full_train[X_filtered.columns], X_full_test[X_filtered.columns] classifier_full = DecisionTreeClassifier() classifier_full.fit(X_full_train, y_train) print(classification_report(y_test, classifier_full.predict(X_full_test))) classifier_filtered = DecisionTreeClassifier() classifier_filtered.fit(X_filtered_train, y_train) print(classification_report(y_test, classifier_filtered.predict(X_filtered_test))) """ Explanation: <div class="alert alert-info"> Currently, 669 non-NaN features survive the feature selection given this target. Again, this number will vary depending on your data, your target and the `tsfresh` version. </div> Train and evaluate classifier Let's train a boosted decision tree on the filtered as well as the full set of extracted features. End of explanation """ X_filtered_2 = extract_relevant_features(df, y, column_id='id', column_sort='time', default_fc_parameters=extraction_settings) (X_filtered.columns == X_filtered_2.columns).all() """ Explanation: Compared to using all features (classifier_full), using only the relevant features (classifier_filtered) achieves better classification performance with less data. <div class="alert alert-info"> Please remember that the hypothesis test in `tsfresh` is a statistical test. You might get better performance with other feature selection methods (e.g. training a classifier with all but one feature to find its importance) - but in general the feature selection implemented in `tsfresh` will give you a very reasonable set of selected features. </div> Extraction and Filtering is the same as filtered Extraction Above, we performed the feature extraction and selection independently. If you are only interested in the list of selected features, you can run this in one step: End of explanation """
metpy/MetPy
v0.12/_downloads/9041777e133eed610f5b243c688e89f9/surface_declarative.ipynb
bsd-3-clause
from datetime import datetime, timedelta import cartopy.crs as ccrs import pandas as pd from metpy.cbook import get_test_data import metpy.plots as mpplots """ Explanation: Surface Analysis using Declarative Syntax The MetPy declarative syntax allows for a simplified interface to creating common meteorological analyses including surface observation plots. End of explanation """ data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False), infer_datetime_format=True, parse_dates=['valid']) """ Explanation: Getting the data In this example, data is originally from the Iowa State ASOS archive (https://mesonet.agron.iastate.edu/request/download.phtml) downloaded through a separate Python script. The data are pre-processed to determine sky cover and weather symbols from text output. End of explanation """ # Plotting the Observations using a 15 minute time window for surface observations obs = mpplots.PlotObs() obs.data = data obs.time = datetime(1993, 3, 12, 13) obs.time_window = timedelta(minutes=15) obs.level = None obs.fields = ['tmpf', 'dwpf', 'emsl', 'cloud_cover', 'wxsym'] obs.locations = ['NW', 'SW', 'NE', 'C', 'W'] obs.colors = ['red', 'green', 'black', 'black', 'blue'] obs.formats = [None, None, lambda v: format(10 * v, '.0f')[-3:], 'sky_cover', 'current_weather'] obs.vector_field = ('uwind', 'vwind') obs.reduce_points = 1 # Add map features for the particular panel panel = mpplots.MapPanel() panel.layout = (1, 1, 1) panel.area = 'ga' panel.projection = ccrs.PlateCarree() panel.layers = ['coastline', 'borders', 'states'] panel.plots = [obs] # Collecting panels for complete figure pc = mpplots.PanelContainer() pc.size = (10, 10) pc.panels = [panel] # Showing the results pc.show() """ Explanation: Plotting the data Use the declarative plotting interface to plot surface observations over the state of Georgia. End of explanation """
Hyperparticle/graph-nlu
notebooks/dynamic_memory_1.ipynb
mit
# Import the necessary packages import pandas as pd import numpy as np import nltk from sklearn.metrics import accuracy_score # Download NLTK packages # An OS window should pop up for you to download the appropriate packages # Select all-nltk and click on the download button. Once download has finished exit the window and continue. nltk.download() # Read the bAbI data as CSV filename = 'resources/qa1_single-supporting-fact_train.txt' data_qa1 = pd.read_csv(filename, delimiter='\t', names=['sentence', 'answer', 'factid']) data_qa1 = data_qa1.fillna('') """ Explanation: Memory Representation in Dialogue Systems The following notebook is the result of an NLP project that explores the question, "How could interaction be stored in memory, and how can that information be leveraged for further use?" Dialog systems can be quite useful, but have difficulty keeping track of concepts and entities dynamically. Commercial implementations among the likes of Siri, Google Assistant, and Alexa are great for performing simple tasks, but fall short when remembering ad-hoc relationships that regularly present themselves in conversation. For more information on dialogue systems, graph databases, and ontologies as they relate to this project, see the white paper entitled IPA_Memory under the docs directory of this repository. To enhance the capabilities of dialogue systems, this notebook will provide a simple software implementation of a model that is intended to by dynamic, incremental, flexible, and interpretable. By forming high-level concepts that evolve over time, this model will evaluate the dialogue system's ability to understand user input. This notebook will show how such a system can update its internal state based on natural language facts, and retrieve results based on natural language questions. See the white paper for more details on the rationale behind these design decisions. The code below is written in Python, and uses a Neo4j Graph Database to provide non-volatile storage and efficient querying capabilities. The test corpus is supplied by the bAbI Tasks Data 1-20 (v1.2). It contains sequences of English sentences to provide the system knowledge of a simple domain involving characters moving to different rooms and interacting with objects. Questions are inserted periodically to evaluate that the system is keeping track of these relationships accurately. Prerequisites to Running this Notebook Python (3.5+) Python packages (install via pip): pandas, numpy, nltk, scikit-learn, neo4j-driver Neo4j (3.1+) Part 1: bAbI QA 1 Process the Text Import DataFrames First we will use pandas to import qa1_single-supporting-fact_train.txt from our corpus into a DataFrame called data. Every line in this document represents one sentence, which will be split using nltk's word tokenizer. End of explanation """ data_qa1[:6] """ Explanation: The cell below shows what the input data looks like. Every sentence in this frame can either be a factual statement, or a question about the preceeding statements. Each statement describes four characters moving between six different rooms. The questions periodically ask the room in which a person is currently in, and the objective is to answer them all correctly, matching the corresponding answer column (it is blank if the sentence is a statement). The factid column indicates the index of the supporting facts for each answer, but we won't be needing it. Due to the nature of the model, training will not be necessary to answer each question. Therefore, the entire document will be used for test evaluation. End of explanation """ # Tag each sentence as a statement (S) or question (Q) tag_sentence = lambda row: 'S' if row.answer == '' else 'Q' data_qa1['type'] = data_qa1.apply(tag_sentence, axis=1) # Use NLTK to tokenize the sentences into arrays of words # If you get an error here, make sure you have downloaded the NLTK packages above tokenize = lambda row: nltk.word_tokenize(row.sentence)[1:] data_qa1.sentence = data_qa1.apply(tokenize, axis=1) # Drop the factid column, as we won't need it data_qa1 = data_qa1.drop('factid', axis=1) data_qa1[:6] """ Explanation: Next, we process this data frame by splitting the sentences and tagging each sentence by its type (statement or question). End of explanation """ # Create a DataFrame with just the statements def statements(df): return df[df.type == 'S'] \ .reset_index(drop=True) \ .drop('answer', axis=1) \ .drop('type', axis=1) # Create a DataFrame with just the questions def questions(df): return df[df.type == 'Q'] \ .reset_index(drop=True) \ .drop('type', axis=1) statements(data_qa1)[:4] questions(data_qa1)[:2] """ Explanation: We further split the data_qa1 DataFrame into statements and questions DataFrames for easy access to all statements and questions respectively. End of explanation """ # Tag each token as a part of speech pos_tag = lambda row: nltk.pos_tag(row.sentence) data_qa1['tag'] = data_qa1.apply(pos_tag, axis=1) data_qa1[['sentence', 'tag']][:5] """ Explanation: Extract Entities Next, we will extract the relevant entities from each statement and question so that we can more easily reason with these sentences. POS Tagging To process each sentence and produce a useful statement or question object, all that is necessary (for this dataset) is to use a part-of-speech tagger. The generated frame below displays the tagged list of (token, word) pairs. End of explanation """ def extract_statement(tags): '''Extracts a (subject, relation, object) triple from each statement based on the POS tags''' subject, relation, obj = '', '', '' for word,tag in tags: if tag == 'NNP': subject = word elif tag == 'VBD' or word == 'journeyed': # TODO: 'journeyed' is tagged improperly relation = word if tag == 'NNP' or tag == 'NN': obj = word return (subject, relation, obj) """ Explanation: Statements Due to the simplicity of the data, each statement can be thought of as a (subject, relation, object) triple. We would like to define a function called extract_statement, that when given a sequence of statement tokens, produces this triple. For instance, extract_statement([Mary, moved, to, the, bathroom, .]) = (Mary, moved, bathroom). This allows one to construct a graph of relationships between objects, as we will see in the next sections. We can use the POS tags in the sentence to achieve this. If there is a word tagged as a proper noun, it is the subject, if there's a verb, it is the relation, and if there's a simple noun, it is the object. End of explanation """ def extract_question(tags): '''Extracts the entity under discussion from each question based on the POS tags''' entityUnderDiscussion = '' # This will find the last noun in the sentence for word,tag in tags: if tag == 'NNP' or tag == 'NN': entityUnderDiscussion = word return entityUnderDiscussion """ Explanation: Questions To test the graph, we would like to define another function extract_question, that when given a sequence of quesiton tokens, returns the entity that the question is asking for. extract_question([Where, is, Mary, ?]) = Mary The result is the subject we are querying for, whose query should return us a room to answer the question. End of explanation """ def extract(row): '''Extracts the appropriate data given a processed DataFrame row''' if row.type == 'S': return extract_statement(row.tag) else: return extract_question(row.tag) data_qa1['extracted'] = data_qa1.apply(extract, axis=1) data_qa1[['sentence', 'extracted']][:5] """ Explanation: Then, call the appropriate function on each DataFrame row to extract the corresponding info. End of explanation """ def person_statements(person): '''Get all statements that refer to the specified person''' stat = statements(data_qa1) return stat[stat.extracted.map(lambda t: t[0] == person)] """ Explanation: Voila, extraction is complete. Debug Functions These are handy debugging functions that we will use for evaluation. This function finds all statements that refer to a person. End of explanation """ person_statements('Sandra')[:3] """ Explanation: For instance, we can find all statements that refer to Sandra. End of explanation """ def person_statements_recent(person, n=5): '''Get the n most recent statements that refer to the specified person in reverse chronological order''' return person_statements(person)[-n:].iloc[::-1] """ Explanation: This function finds the n most recent statements that refer to a person. End of explanation """ person_statements_recent('Daniel', n=3) """ Explanation: For instance, we can find the 3 most recent statements Daniel has been referred in. End of explanation """ from neo4j.v1 import GraphDatabase, basic_auth # Create a neo4j session # NOTE: Make sure that URL/credentials are correct and that Neo4j is running driver = GraphDatabase.driver('bolt://localhost:7687', auth=basic_auth('neo4j', 'neo4j')) # WARNING: This function will clear the database when run! # Make sure all important data is backed up before continuing def reset_db(): '''Remove all nodes and relationships from the database''' session = driver.session() session.run('MATCH (n) DETACH DELETE n') def create(query, n=0): '''Given a query, create a graph based on each triple in the extracted statements''' session = driver.session() stat = statements(data_qa1) n = len(stat) if n <= 0 else n # Run the first n statements if specified for subject,relation,obj in stat[:n].extracted: session.run(query, subject=subject, relation=relation, obj=obj) """ Explanation: Build the Graph Once we have processed the data into triples, we can build graphs from them. Below we have defined a couple functions to reset the database and run queries. We will use Neo4j's Python driver to accomplish this. Note that if the URL or auth credentials of your Neo4j server are different, you will need to change them below. End of explanation """ reset_db() # This will clear the database! # Create a direct relationship between subject and object v1_query = ''' MERGE (s:SUBJECT {name: $subject}) MERGE (o:OBJECT {name: $obj}) MERGE (s)-[r:RELATION {name: $relation}]->(o) ''' create(v1_query) """ Explanation: V1: Direct relationships One of the first impulses when building the graph may be to represent the subject and object as nodes, and the relations as edges between them. End of explanation """ reset_db() # This will clear the database! # Represent each relation as a node v2_query = ''' MERGE (s:SUBJECT {name: $subject}) MERGE (o:OBJECT {name: $obj}) CREATE (s)-[:R0]->(r:RELATION {name: $relation})-[:R1]->(o) ''' create(v2_query) """ Explanation: Run the query below and see what the graph looks like. Pop open a new tab in the Neo4j browser (default http://localhost:7474/browser/) and run the query: MATCH (n) RETURN n LIMIT 50 The graph is a reasonable first start, as the relations point each person to where they have been. But this poses a potential problem: how do we know where each person is right now, or where they have been previously? All we can know from the graph is which rooms a person has been in, because they may have visited them all multiple times. <img src="screenshots/simple-relation.png" style="width:700px"> V2: Nodes for relationships One approach is to form a linked list of "events". Each event corresponds to a person updating the room that they are in. Since we chose edges to be our relations, we cannot form edges between relations. To alleviate this, we can transform the relation to a node, and draw two edges to form a 3-node triple. End of explanation """ reset_db() # Represent each relation as a node, ordered by a linked list (per subject) v3_query = ''' MERGE (s:SUBJECT {name: $subject}) MERGE (o:OBJECT {name: $obj}) WITH s,o // Create an new relation between the subject and object CREATE (s)-[:R0]->(r:RELATION {name: $relation})-[:R1]->(o) CREATE (s)-[h:HEAD]->(r) // Make the newly created relation the head of the list WITH s,r,o,h // Find the previous head of the list (if none exist, this query will terminate here) MATCH (s)-[h_prev:HEAD]->(r_prev:RELATION) WHERE h_prev <> h // Complete the link, remove the previous head pointer CREATE (r_prev)-[:NEXT]->(r) DELETE h_prev ''' session = driver.session() # Create an index for faster access session.run('CREATE INDEX ON :SUBJECT(name)') session.run('CREATE INDEX ON :RELATION(name)') session.run('CREATE INDEX ON :OBJECT(name)') create(v3_query) """ Explanation: Run the query again and see what changed. This is better, since we can see how often a room has been visited, but still doesn't solve the question as to which room a person is in at any given time. V3: Linked list of relationships The final step is to build the linked list based on the order in which the relations were created. This will allow us to not only find the room a person is in right now, but produce a list of rooms that they were in, in the order that they were visited. End of explanation """ def find_person(person): '''Find the room a person is currently in''' query = ''' MATCH (s:SUBJECT {name:$name})-[:HEAD]->(r:RELATION)-->(o:OBJECT) RETURN s AS subject, r AS relation, o AS obj ''' return session.run(query, name=person) """ Explanation: Check the new graph out and see what changed. It's helpful to change the colors of the nodes and edges to visualize this better. <img src="screenshots/local-list.png" style="width:800px"> Query the Graph Now we can ask the graph useful questions. Find the room a person is in End of explanation """ # Note: If this is run less than a second after creating the knowledge graph, # the Python driver may cause a race condition where the graph # isn't finished updating, which could give you the wrong answer. session = driver.session() record = find_person('Mary').single() print(record['obj'].get('name')) """ Explanation: Using the graph-querying function above we can ask, "Where is Mary?" End of explanation """ person_statements_recent('Mary', n=1) """ Explanation: According to the graph, Mary is in the kitchen. We can verify that this is true with the debug function below, and we can see the corresponding sentence that generated the relationship as well. End of explanation """ def find_person_history(person, n=100): '''Find the list of rooms a person was in, ordered by recency''' length = str(n) if n >= 1 else '' query = ''' MATCH (s:SUBJECT {name:$name})-[:HEAD]->(r:RELATION)-->(o:OBJECT) MATCH (s)-->(r_prev:RELATION)-[k*1..%s]->(r), (r_prev)-->(o_prev:OBJECT) WITH size(k) AS dist, r, o, r_prev, o_prev ORDER BY size(k) WITH r, o, r_prev, o_prev RETURN [r.name] + collect(r_prev.name) AS relation, [o.name] + collect(o_prev.name) AS obj ''' query = query % length session = driver.session() record = session.run(query, name=person).single() history = list(zip(record['relation'], record['obj']))[:-1] return history """ Explanation: Find the rooms a person has been in (reverse chronological order) End of explanation """ find_person_history('John', n=5) """ Explanation: A more advanced question that we get for free based on the graph structure is, "Where has John been recently?" End of explanation """ person_statements_recent('John', n=5) """ Explanation: Verify that John has been to to those places, in that order. End of explanation """ def find_room_visitors(room): '''Find the list of visitors a room has, ordered by recency''' query = ''' MATCH (r:RELATION)-->(o:OBJECT {name:$name}) RETURN count(r) AS count ''' session = driver.session() record = session.run(query, name=room).single() return record['count'] """ Explanation: Find the history of visitors for a room End of explanation """ find_room_visitors('office') """ Explanation: Just for fun, we can find out how many times a room has been visited. "How many times has the office been visited?" End of explanation """ def get_answers(row): '''Given an input row merge the statement in the graph, or query the graph if it is a question''' if row.type == 'S': subject,relation,obj = row.extracted session.run(v3_query, subject=subject, relation=relation, obj=obj) return '' elif row.type == 'Q': person = row.extracted # WARNING: do not consume the result (e.g., call .consume() or .single()) # until the entire iteration is done. # Failure to do so may cause the queries to be VERY slow! return find_person(person) """ Explanation: Calculate an Accuracy Score End of explanation """ reset_db() session = driver.session() results = data_qa1.apply(get_answers, axis=1) results = [x for x in results if x != ''] predicted = [result.single()['obj'].get('name') for result in results] """ Explanation: Start all over, and run through the entire dataset. End of explanation """ predicted[:5] """ Explanation: The predicted array contains the predicted answer to each question.` End of explanation """ actual = list(data_qa1[data_qa1.type == 'Q'].answer) actual[:5] accuracy_score(actual, predicted) """ Explanation: The actual array contains the actual answers to all questions. End of explanation """
DataPilot/notebook-miner
Notebook-miner test.ipynb
apache-2.0
from base_loader import base_loader """ Explanation: Notebook-miner tests This Notebook is meant to test the notebook-miner initial library (currently base_loader), making sure that we are able to import and use it as expected. End of explanation """ notebook_loaded = base_loader('example_notebooks/ML-Exercise2.ipynb') notebook_loaded.get_kernelspec() print (notebook_loaded.get_all_python()) """ Explanation: Test.ipynb is only a few cells, and will be good for initial testing of the loader. Let's load it in, and check which version of python it is End of explanation """ summary_string = '' summary_string += "Number of cells in notebook is: " + str(notebook_loaded.get_number_cells()) + "\n" summary_string += "Number of python cells in notebook is: " + str(notebook_loaded.get_number_python_cells()) + "\n" summary_string += "Number of markdown cells in notebook is: " + str(notebook_loaded.get_number_markdown_cells()) + "\n" print (summary_string) """ Explanation: Now that we know what we're dealing with, let's gather statistics on this notebook End of explanation """ for i, cell_ind in enumerate(notebook_loaded.get_execution_order()): print ("Executed cell number ",i) #print (notebook_loaded.get_cell_by_execution_order(i)) print ("Output of this cell ") print (notebook_loaded.get_cell_outputs(cell_ind)) """ Explanation: Now say we want to see the output of all the python cells. First, let's check how many cells were actually executed, then let's look through them to see the outputs End of explanation """ from summary_statistics import summary_statistics ss = summary_statistics(notebook_loaded) print (ss.full_statistics()) """ Explanation: Summary Statistics add on End of explanation """ %%time for i in range(100): notebook_loaded.get_cell_and_convert(0) %%time for i in range(100): notebook_loaded.get_raw_cell_converted(0) %%time for i in range(100): notebook_loaded.get_raw_cell(0) """ Explanation: NBConvert Speed test As you can see below, nbconvert might not be the best bet -- if we can just replace magics with their comments, we should be better off End of explanation """ import os notebook_array = [] for nb_file in os.listdir("example_notebooks"): if nb_file.endswith(".ipynb"): nb_file = os.path.join("example_notebooks" , nb_file) notebook_array.append(base_loader(nb_file)) for i, notebook_loaded in enumerate(notebook_array): print ("Notebook",i+1,"/",len(notebook_array)) print ("Name:",notebook_loaded.get_filename()) print ( summary_statistics(notebook_loaded).full_statistics() ) """ Explanation: Getting an array of notebooks End of explanation """
ueapy/ueapy.github.io
content/notebooks/2017-10-30-pythonic-code.ipynb
mit
import this """ Explanation: Writing idiomatic python code the Zen of Python End of explanation """ if []: print('this is false') False # false is false [] # empty lists {} # empty dictionaries or sets "" # empty strings 0 # zero integers 0.00000 # zero floats None # None (but not zero) """ Explanation: PEP8 Let's have a look: https://www.python.org/dev/peps/pep-0008/ Static checkers in editors and GitHub Naming conventions Names should be descriptive! Rule of thumb: If you need a comment to describe it, you probably need a better name. ```python import my_module_name from foo import bar constant, not changed at runtime MODEL_GRID = 'cartesian' def calculate_something(data): """Compute some parameters""" # ... return result class ModelDomain(object): """ Very long description... Attributes: ... Parameters: ... """ def init(self, name='', description='', bounds=None, mask_bounds=None): self.name = name self.description = description # something else... def mask_var(self, data): """Mask the data of the given variable outside the region.""" return data.where(self.mask_bounds, data) class MyDescriptiveError(Exception): pass ``` A handful of foundational concepts Truthiness End of explanation """ class MyClass: def __init__(self, data): self.data = data def __bool__(self): if len(self.data) > 0: return True else: return False foo = MyClass(data=[1, 2, 3]) if foo: print('it contains some data') else: print('no data') """ Explanation: Thuthiness is defined by __bool__() method End of explanation """ vrbl = True if vrbl: # NO: if condtion == True print('do something') """ Explanation: What's the pythonic way? End of explanation """ def test_if_greater_than_ten(x): return True if x>10 else False test_if_greater_than_ten(11) def fun(): print('blah') x = fun x() type(x) callable(x) isinstance(12345, (int, float)) """ Explanation: Don't be this guy: <img src="https://imgur.com/24DSLiA.png"> End of explanation """ my_axis = 'x' if my_axis in ('x', 'y'): # Instead of writing like that: # if my_axis == 'x' or my_axis == 'y' print('horizontal') elif my_axis == 'z': print('vertical') """ Explanation: Testing for None python if something is None: print('no results') else: print('here are some results') negation: python if something is not None: print('Option A') else: print('Option B') Multiple tests against a single variable End of explanation """ a = [1,2,3] """ Explanation: Sometimes can be a bit slower if used inside a long loop. Checking for type End of explanation """ import numpy as np a = np.arange(10) isinstance(a, np.ndarray) """ Explanation: How to check if the variable is a list or dictionary? End of explanation """ the_variable = [1, 2, 3, 4] another_variable = "This is my string. There are many like it, but this one is mine." and_another_variable = 1000000000000 for i in another_variable[:10]: # iterate over the first 10 elements and print them print(i) import collections if isinstance(1234563645, collections.Iterable): # iterable print('It is iterable') else: # not iterable print('It is NOT iterable') """ Explanation: Checking if an object is iterable? End of explanation """ hasattr(the_variable, '__iter__') """ Explanation: Similar way, by checking the methods: End of explanation """ day = 30 month = 'October' """ Explanation: Another way: duck-typing style Pythonic programming style that determines an object's type by inspection of its method or attribute signature rather than by explicit relationship to some type object ("If it looks like a duck and quacks like a duck, it must be a duck.") By emphasizing interfaces rather than specific types, well-designed code improves its flexibility by allowing polymorphic substitution. Duck-typing avoids tests using type() or isinstance(). Instead, it typically employs the EAFP (Easier to Ask Forgiveness than Permission) style of programming. python try: for i in the_variable: # ... except TypeError: print(the_variable, 'is not iterable') Source: https://stackoverflow.com/a/1952481/5365232 Modern string formatting End of explanation """ print('Today is ' + month + ', ' + str(day)) """ Explanation: won't work, because Python does not automatically convert to str: python print('Today is ' + month + ', ' + day) Works, but not pythonic: End of explanation """ print('Today is {}, {}'.format(month, day)) print('Today is {1}, {0}'.format(month, day)) print('Today is {1}, {0}. Tomorrow will be still {0}'.format(month, day)) print('Today is {m}, {d}. Tomorrow will be still {m}. And again: {d}'.format(m=month, d=day)) """ Explanation: Pythonic: End of explanation """ data = {'dow': 'Monday', 'location': 'UEA', 'who': 'Python Group'} print('Today is {dow}. We are at {location}.'.format(**data)) """ Explanation: using dictionaries End of explanation """ print(f'Today is {day}th. The month is {month}') # print(f'Today is {data["dow"]}. We are at {data["location"]}') """ Explanation: f-strings (Python 3.6+) Just pulling variables from the namespace! End of explanation """ HTML(html) """ Explanation: References Write Pythonic Code Like a Seasoned Developer End of explanation """
thempel/adaptivemd
examples/tutorial/1_example_setup_project.ipynb
lgpl-2.1
import sys, os """ Explanation: First we cover some basics about adaptive sampling to get you going. We will briefly talk about resources files generators how to run a simple trajectory Imports End of explanation """ from adaptivemd import Project """ Explanation: Alright, let's load the package and pick the Project since we want to start a project End of explanation """ # Use this to completely remove the example-worker project from the database. Project.delete('tutorial') project = Project('tutorial') """ Explanation: Let's open a project with a UNIQUE name. This will be the name used in the DB so make sure it is new and not too short. Opening a project will always create a non-existing project and reopen an exising one. You cannot chose between opening types as you would with a file. This is a precaution to not accidentally delete your project. End of explanation """ from adaptivemd import LocalResource """ Explanation: Now we have a handle for our project. First thing is to set it up to work on a resource. The Resource What is a resource? A Resource specifies a shared filesystem with one or more clusteres attached to it. This can be your local machine or just a regular cluster or even a group of cluster that can access the same FS (like Titan, Eos and Rhea do). Once you have chosen your place to store your results it is set for the project and can (at least should) not be altered since all file references are made to match this resource. Let us pick a local resource on your laptop or desktop machine for now. No cluster / HPC involved for now. End of explanation """ resource = LocalResource() """ Explanation: We now create the Resource object End of explanation """ resource.shared_path """ Explanation: Since this object defines the path where all files will be placed, let's get the path to the shared folder. The one that can be accessed from all workers. On your local machine this is trivially the case. End of explanation """ project.initialize(resource) """ Explanation: Okay, files will be placed in $HOME/adaptivemd/. You can change this using an option when creating the Resource python LocalCluster(shared_path='$HOME/my/adaptive/folder/') If you are interested in more information about Resource setup consult the documentation about Resource Last, we save our configured Resource and initialize our empty prohect with it. This is done once for a project and should not be altered. End of explanation """ from adaptivemd import File, Directory """ Explanation: Files End of explanation """ pdb_file = File('file://../files/alanine/alanine.pdb') """ Explanation: First we define a File object. Instead of just a string, these are used to represent files anywhere, on the cluster or your local application. There are some subclasses or extensions of File that have additional meta information like Trajectory or Frame. The underlying base object of a File is called a Location. We start with a first PDB file that is located on this machine at a relative path End of explanation """ pdb_file.name = 'initial_pdb' """ Explanation: File like any complex object in adaptivemd can have a .name attribute that makes them easier to find later. You can either set the .name property after creation, or use a little helper method .named() to get a one-liner. This function will set .name and return itself. For more information about the possibilities to specify filelocation consult the documentation for File End of explanation """ pdb_file.load() """ Explanation: The .load() at the end is important. It causes the File object to load the content of the file and if you save the File object, the actual file is stored with it. This way it can simply be rewritten on the cluster or anywhere else. End of explanation """ from adaptivemd.engine.openmm import OpenMMEngine """ Explanation: Generators TaskGenerators are instances whose purpose is to create tasks to be executed. This is similar to the way Kernels work. A TaskGenerator will generate Task objects for you which will be translated into a ComputeUnitDescription and executed. In simple terms: The task generator creates the bash scripts for you that run a simulation or run pyemma. A task generator will be initialized with all parameters needed to make it work and it will now what needs to be staged to be used. The engine End of explanation """ engine = OpenMMEngine( pdb_file=pdb_file, system_file=File('file://../files/alanine/system.xml').load(), integrator_file=File('file://../files/alanine/integrator.xml').load(), args='-r --report-interval 1 -p CPU' ).named('openmm') """ Explanation: A task generator that will create jobs to run simulations. Currently it uses a little python script that will excute OpenMM. It requires conda to be added to the PATH variable or at least openmm to be installed on the cluster. If you setup your resource correctly then this should all happen automatically. So let's do an example for an OpenMM engine. This is simply a small python script that makes OpenMM look like a executable. It run a simulation by providing an initial frame, OpenMM specific system.xml and integrator.xml files and some additional parameters like the platform name, how often to store simulation frames, etc. End of explanation """ engine.name """ Explanation: We have now an OpenMMEngine which uses the previously made pdb File object and uses the location defined in there. The same for the OpenMM XML files and some args to run using the CPU kernel, etc. Last we name the engine openmm to find it later. End of explanation """ engine.add_output_type('master', 'master.dcd', stride=10) engine.add_output_type('protein', 'protein.dcd', stride=1, selection='protein') """ Explanation: Next, we need to set the output types we want the engine to generate. We chose a stride of 10 for the master trajectory without selection and a second trajectory with only protein atoms and native stride. Note that the stride and all frame number ALWAYS refer to the native steps used in the engine. In out example the engine uses 2fs time steps. So master stores every 20fs and protein every 2fs End of explanation """ from adaptivemd.analysis.pyemma import PyEMMAAnalysis """ Explanation: The modeller End of explanation """ modeller = PyEMMAAnalysis( engine=engine, outtype='protein', features={'add_inverse_distances': {'select_Backbone': None}} ).named('pyemma') """ Explanation: The instance to compute an MSM model of existing trajectories that you pass it. It is initialized with a .pdb file that is used to create features between the $c_\alpha$ atoms. This implementaton requires a PDB but in general this is not necessay. It is specific to my PyEMMAAnalysis show case. End of explanation """ project.generators.add(engine) project.generators.add(modeller) """ Explanation: Again we name it pyemma for later reference. The other two option chose which output type from the engine we want to analyse. We chose the protein trajectories since these are faster to load and have better time resolution. The features dict expresses which features to use. In our case use all inverse distances between backbone c_alpha atoms. Add generators to project Next step is to add these to the project for later usage. We pick the .generators store and just add it. Consider a store to work like a set() in python. It contains objects only once and is not ordered. Therefore we need a name to find the objects later. Of course you can always iterate over all objects, but the order is not given. To be precise there is an order in the time of creation of the object, but it is only accurate to seconds and it really is the time it was created and not stored. End of explanation """ trajectory = project.new_trajectory(engine['pdb_file'], 100, engine) trajectory """ Explanation: Note, that you cannot add the same engine twice. But if you create a new engine it will be considered different and hence you can store it again. Create one initial trajectory Finally we are ready to run a first trajectory that we will store as a point of reference in the project. Also it is nice to see how it works in general. We are using a Worker approach. This means simply that someone (in our case the user from inside a script or a notebook) creates a list of tasks to be done and some other instance (the worker) will actually do the work. Create a Trajectory object First we create the parameters for the engine to run the simulation. Since it seemed appropriate we use a Trajectory object (a special File with initial frame and length) as the input. You could of course pass these things separately, but this way, we can actualy reference the no yet existing trajectory and do stuff with it. A Trajectory should have a unique name and so there is a project function to get you one. It uses numbers and makes sure that this number has not been used yet in the project. End of explanation """ print trajectory.length """ Explanation: This says, initial is alanine.pdb run for 100 frames and is named xxxxxxxx.dcd. Why do we need a trajectory object? You might wonder why a Trajectory object is necessary. You could just build a function that will take these parameters and run a simulation. At the end it will return the trajectory object. The same object we created just now. The main reason is to familiarize you with the general concept of asyncronous execution and so-called Promises. The trajectory object we built is similar to a Promise so what is that exactly? A Promise is a value (or an object) that represents the result of a function at some point in the future. In our case it represents a trajectory at some point in the future. Normal promises have specific functions do deal with the unknown result, for us this is a little different but the general concept stands. We create an object that represents the specifications of a Trajectory and so, regardless of the existence, we can use the trajectory as if it would exists: Get the length End of explanation """ print trajectory[20] """ Explanation: and since the length is fixed, we know how many frames there are and can access them End of explanation """ print trajectory.extend(100) """ Explanation: ask for a way to extend the trajectory End of explanation """ print trajectory.run() """ Explanation: ask for a way to run the trajectory End of explanation """ task = engine.run(trajectory) """ Explanation: We can ask to extend it, we can save it. We can reference specific frames in it before running a simulation. You could even build a whole set of related simulations this way without running a single frame. You might understand that this is pretty powerful especially in the context of running asynchronous simulations. Last, we did not answer why we have two separate steps: Create the trajectory first and then a task from it. The main reason is educational: It needs to be clear that a Trajectory can exist before running some engine or creating a task for it. The Trajectory is not a result of a simulation action. Create a Task object Now, we want that this trajectory actually exists so we have to make it. This requires a Task object that knows to describe a simulation. Since Task objects are very flexible and can be complex there are helper functions (i.e. factories) to get these in an easy manner, like the ones we already created just before. Let's use the openmm engine to create an openmm task now. End of explanation """ task = trajectory.run() """ Explanation: As an alternative you can directly use the trajectory (which knows its engine) and call .run() End of explanation """ project.queue(task) # shortcut for project.tasks.add(task) """ Explanation: That's it, just take a trajectory description and turn it into a task that contains the shell commands and needed files, etc. Submit the task to the queue Finally we need to add this task to the things we want to be done. This is easy and only requires saving the task to the project. This is done to the project.tasks bundle and once it has been stored it can be picked up by any worker to execute it. End of explanation """ print project.files print project.trajectories """ Explanation: That is all we can do from here. To execute the tasks you need to run a worker using bash adaptivemdworker -l tutorial --verbose Once this is done, come back here and check your results. If you want you can execute the next cell which will block until the task has been completed. End of explanation """ project.close() """ Explanation: and close the project. End of explanation """
fja05680/pinkfish
examples/120.sell-short/strategy.ipynb
mit
import datetime import matplotlib.pyplot as plt import pandas as pd import pinkfish as pf # Format price data pd.options.display.float_format = '{:0.2f}'.format %matplotlib inline # Set size of inline plots '''note: rcParams can't be in same cell as import matplotlib or %matplotlib inline %matplotlib notebook: will lead to interactive plots embedded within the notebook, you can zoom and resize the figure %matplotlib inline: only draw static images in the notebook ''' plt.rcParams["figure.figsize"] = (10, 7) pf.DEBUG = False """ Explanation: sell-short-in-may-and-go-away see: https://en.wikipedia.org/wiki/Sell_in_May The reason for this example is to demonstrate short selling (algo), and short selling using adjust_percent function (algo2). algo - Sell short in May and go away, buy to cover in Nov algo2 - first trading day of the month, adjust position to 50% (Select the one you want to call in the Strategy.run() function End of explanation """ #symbol = '^GSPC' symbol = 'SPY' capital = 10000 start = datetime.datetime(2015, 10, 30) #start = datetime.datetime(*pf.SP500_BEGIN) end = datetime.datetime.now() """ Explanation: Some global data End of explanation """ class Strategy: def __init__(self, symbol, capital, start, end): self.symbol = symbol self.capital = capital self.start = start self.end = end self.ts = None self.tlog = None self.dbal = None self.stats = None def _algo(self): pf.TradeLog.cash = self.capital for i, row in enumerate(self.ts.itertuples()): date = row.Index.to_pydatetime() close = row.close; end_flag = pf.is_last_row(self.ts, i) shares = 0 # Buy to cover (at the open on first trading day in Nov) if self.tlog.shares > 0: if (row.month == 11 and row.first_dotm) or end_flag: shares = self.tlog.buy2cover(date, row.open) # Sell short (at the open on first trading day in May) else: if row.month == 5 and row.first_dotm: shares = self.tlog.sell_short(date, row.open) if shares > 0: pf.DBG("{0} SELL SHORT {1} {2} @ {3:.2f}".format( date, shares, self.symbol, row.open)) elif shares < 0: pf.DBG("{0} BUY TO COVER {1} {2} @ {3:.2f}".format( date, -shares, self.symbol, row.open)) # Record daily balance self.dbal.append(date, close) def _algo2(self): pf.TradeLog.cash = self.capital for i, row in enumerate(self.ts.itertuples()): date = row.Index.to_pydatetime() close = row.close; end_flag = pf.is_last_row(self.ts, i) shares = 0 # On the first day of the month, adjust short position to 50% if (row.first_dotm or end_flag): weight = 0 if end_flag else 0.5 self.tlog.adjust_percent(date, close, weight, pf.Direction.SHORT) # Record daily balance self.dbal.append(date, close) def run(self): self.ts = pf.fetch_timeseries(self.symbol) self.ts = pf.select_tradeperiod(self.ts, self.start, self.end, use_adj=True) # add calendar columns self.ts = pf.calendar(self.ts) self.tlog = pf.TradeLog(self.symbol) self.dbal = pf.DailyBal() self.ts, self.start = pf.finalize_timeseries(self.ts, self.start) # Pick either algo or algo2 self._algo() #self._algo2() self._get_logs() self._get_stats() def _get_logs(self): self.rlog = self.tlog.get_log_raw() self.tlog = self.tlog.get_log() self.dbal = self.dbal.get_log(self.tlog) def _get_stats(self): self.stats = pf.stats(self.ts, self.tlog, self.dbal, self.capital) """ Explanation: Define Strategy Class End of explanation """ s = Strategy(symbol, capital, start, end) s.run() s.rlog.head() s.tlog.head() s.dbal.tail() """ Explanation: Run Strategy End of explanation """ benchmark = pf.Benchmark(symbol, s.capital, s.start, s.end) benchmark.run() """ Explanation: Run Benchmark, Retrieve benchmark logs, and Generate benchmark stats End of explanation """ pf.plot_equity_curve(s.dbal, benchmark=benchmark.dbal) """ Explanation: Plot Equity Curves: Strategy vs Benchmark End of explanation """ pf.plot_trades(s.dbal, benchmark=benchmark.dbal) """ Explanation: Plot Trades End of explanation """ df = pf.plot_bar_graph(s.stats, benchmark.stats) df """ Explanation: Bar Graph: Strategy vs Benchmark End of explanation """
brianray/puppy_dec_2015
PuPPy Dec 2015-Parts of Speech in Python.ipynb
apache-2.0
sent = "Each of us is full of shit in our own special way" # setup display for demo %matplotlib inline import os os.environ['DISPLAY'] = 'localhost:1' """ Explanation: ``` First, let's analyze some text... ... ``` “Each of us is full of shit in our own special way. We are all shitty little snowflakes dancing in the universe.” ― Lewis Black, Me of Little Faith <img src="files/don.png"> Alice's Case <img src="files/plus.png" width="60%"> Overview of Taggers/Parsers Tagging and Parsing into Trees is different: Tagging: Tagging every word [fast] Parsing: Tagging and puts into Tree [slow] Chunking: Gives pieces of Trees [medium] POSH Rules: Special fact and deap and context aware [amazing] Other important words: Probabilistic Parsing Chart Parsing Grammer Strategy NLTK is the mother of all mother of NLP so many parsers: pyStatParser (python yay!, little slow, but fun) Stanford (popular) and btw, online! => http://nlp.stanford.edu:8080/parser/ TextBlob (python yay! NLTK simplification) clips Pattern (python yay!) MaltParser (java 1.8) spaCy (pyython yay!) Example Parsers/Taggers End of explanation """ from stat_parser import Parser parser = Parser() parser.parse(sent) tree = parser.parse(sent) # returns nltk Tree instance tree """ Explanation: pyStatParser End of explanation """ from textblob import TextBlob blob = TextBlob(sent) blob.parse() """ Explanation: TextBlob End of explanation """ import nltk mp = nltk.parse.malt.MaltParser(os.getcwd(), model_filename="engmalt.linear-1.7.mco") mp.parse_one(sent.split()).tree() """ Explanation: MaltParser End of explanation """ from pattern.en import parse, pprint s = parse(sent, tokenize = True, # Tokenize the input, i.e. split punctuation from words. tags = True, # Find part-of-speech tags. chunks = True, # Find chunk tags, e.g. "the black cat" = NP = noun phrase. relations = True, # Find relations between chunks. lemmata = True, # Find word lemmata. light = False) pprint(s) """ Explanation: Pattern End of explanation """ from spacy.en import English parser = English() parsedData = parser(unicode(sent)) for i, token in enumerate(parsedData): print("original:", token.orth, token.orth_) print("lowercased:", token.lower, token.lower_) print("lemma:", token.lemma, token.lemma_) print("shape:", token.shape, token.shape_) print("prefix:", token.prefix, token.prefix_) print("suffix:", token.suffix, token.suffix_) print("log probability:", token.prob) print("Brown cluster id:", token.cluster) print("----------------------------------------") if i > 1: break """ Explanation: spaCy End of explanation """ from visualize_word_graph import draw_graph draw_graph("dog") draw_graph("noise", hypernym=True) """ Explanation: <a href="https://api.spacy.io/displacy/index.html?full=Each of us is full of shit in our own special way. We are all shitty little snowflakes dancing in the universe." target="_new">Interactive Example</a> Word Langauge Graph End of explanation """ bad_sounds =['The sound in the place is terrible.', 'dining with clatter and the occasional smell of BMW exausts', 'Also, the acoustics are not conducive to having any sort of conversation.'] not_bad_sounds = ["not to sound like a snob", "at your table and you can tune the sound to whichever game you're interested in", "oh god I sound old!"] """ Explanation: Alice's Yelp Data End of explanation """ from pattern.en import parse, pprint def print_parts(sents): for sent in sents: s = parse(sent, tokenize = True, # Tokenize the input, i.e. split punctuation from words. tags = True, # Find part-of-speech tags. chunks = True, # Find chunk tags, e.g. "the black cat" = NP = noun phrase. relations = True, # Find relations between chunks. lemmata = True, # Find word lemmata. light = False) print sent pprint(s) sents = bad_sounds + not_bad_sounds print_parts(bad_sounds + not_bad_sounds) """ Explanation: 1. parts of speach for each End of explanation """ from pattern.en import parsetree from pattern.search import search for sent in sents: t = parsetree(sent) print print sent print "Tagged Sent:", t print "Verbs:", search('VB*', t) # verbs print "ADJP:", search('ADJP', t) # verbs print "Nouns:", search('NN', t) # all nouns """ Explanation: Penn Treebank Project Chunks <a href="tagguide.pdf">guide</a> parts <table class="border"> <tbody> <tr> <td><span class="smallcaps">Tag </span></td> <td><span class="smallcaps">Description </span></td> <td class="smallcaps">Example</td> </tr> <tr> <td><span class="postag">CC </span></td> <td>conjunction, coordinating</td> <td><em>and, or, but</em></td> </tr> <tr> <td><span class="postag">CD </span></td> <td>cardinal number</td> <td><em>five, three, 13%</em></td> </tr> <tr> <td><span class="postag">DT </span></td> <td>determiner</td> <td><em>the, a, these <br></em></td> </tr> <tr> <td><span class="postag">EX </span></td> <td>existential there</td> <td><em><span style="text-decoration: underline;">there</span> were six boys <br></em></td> </tr> <tr> <td><span class="postag">FW </span></td> <td>foreign word</td> <td><em>mais <br></em></td> </tr> <tr> <td><span class="postag">IN </span></td> <td>conjunction, subordinating or preposition</td> <td><em>of, on, before, unless <br></em></td> </tr> <tr> <td><span class="postag">JJ </span></td> <td>adjective</td> <td><em>nice, easy </em></td> </tr> <tr> <td><span class="postag">JJR </span></td> <td>adjective, comparative</td> <td><em>nicer, easier</em></td> </tr> <tr> <td><span class="postag">JJS </span></td> <td>adjective, superlative</td> <td><em>nicest, easiest <br></em></td> </tr> <tr> <td><span class="postag">LS </span></td> <td>list item marker</td> <td><em>&nbsp;</em></td> </tr> <tr> <td><span class="postag">MD </span></td> <td>verb, modal auxillary</td> <td><em>may, should <br></em></td> </tr> <tr> <td><span class="postag">NN </span></td> <td>noun, singular or mass</td> <td><em>tiger, chair, laughter <br></em></td> </tr> <tr> <td><span class="postag">NNS </span></td> <td>noun, plural</td> <td><em>tigers, chairs, insects <br></em></td> </tr> <tr> <td><span class="postag">NNP </span></td> <td>noun, proper singular</td> <td><em>Germany, God, Alice <br></em></td> </tr> <tr> <td><span class="postag">NNPS </span></td> <td>noun, proper plural</td> <td><em>we met two <span style="text-decoration: underline;">Christmases</span> ago <br></em></td> </tr> <tr> <td><span class="postag">PDT </span></td> <td>predeterminer</td> <td><em><span style="text-decoration: underline;">both</span> his children <br></em></td> </tr> <tr> <td><span class="postag">POS</span></td> <td>possessive ending</td> <td><em>'s</em></td> </tr> <tr> <td><span class="postag">PRP </span></td> <td>pronoun, personal</td> <td><em>me, you, it <br></em></td> </tr> <tr> <td><span class="postag">PRP&#36; </span></td> <td>pronoun, possessive</td> <td><em>my, your, our <br></em></td> </tr> <tr> <td><span class="postag">RB </span></td> <td>adverb</td> <td><em>extremely, loudly, hard&nbsp; <br></em></td> </tr> <tr> <td><span class="postag">RBR </span></td> <td>adverb, comparative</td> <td><em>better <br></em></td> </tr> <tr> <td><span class="postag">RBS </span></td> <td>adverb, superlative</td> <td><em>best <br></em></td> </tr> <tr> <td><span class="postag">RP </span></td> <td>adverb, particle</td> <td><em>about, off, up <br></em></td> </tr> <tr> <td><span class="postag">SYM </span></td> <td>symbol</td> <td><em>&#37; <br></em></td> </tr> <tr> <td><span class="postag">TO </span></td> <td>infinitival to</td> <td><em>what <span style="text-decoration: underline;">to</span> do? <br></em></td> </tr> <tr> <td><span class="postag">UH </span></td> <td>interjection</td> <td><em>oh, oops, gosh <br></em></td> </tr> <tr> <td><span class="postag">VB </span></td> <td>verb, base form</td> <td><em>think <br></em></td> </tr> <tr> <td><span class="postag">VBZ </span></td> <td>verb, 3rd person singular present</td> <td><em>she <span style="text-decoration: underline;">thinks </span><br></em></td> </tr> <tr> <td><span class="postag">VBP </span></td> <td>verb, non-3rd person singular present</td> <td><em>I <span style="text-decoration: underline;">think </span><br></em></td> </tr> <tr> <td><span class="postag">VBD </span></td> <td>verb, past tense</td> <td><em>they <span style="text-decoration: underline;">thought </span><br></em></td> </tr> <tr> <td><span class="postag">VBN </span></td> <td>verb, past participle</td> <td><em>a <span style="text-decoration: underline;">sunken</span> ship <br></em></td> </tr> <tr> <td><span class="postag">VBG </span></td> <td>verb, gerund or present participle</td> <td><em><span style="text-decoration: underline;">thinking</span> is fun <br></em></td> </tr> <tr> <td><span class="postag">WDT </span></td> <td><em>wh</em>-determiner</td> <td><em>which, whatever, whichever <br></em></td> </tr> <tr> <td><span class="postag">WP </span></td> <td><em>wh</em>-pronoun, personal</td> <td><em>what, who, whom <br></em></td> </tr> <tr> <td><span class="postag">WP$$</span></td> <td><em>wh</em>-pronoun, possessive</td> <td><em>whose, whosever <br></em></td> </tr> <tr> <td><span class="postag">WRB</span></td> <td><em>wh</em>-adverb</td> <td><em>where, when <br></em></td> </tr> <tr> <td><span class="postag">. </span></td> <td>punctuation mark, sentence closer</td> <td><em>.;?* <br></em></td> </tr> <tr> <td><span class="postag">, </span></td> <td>punctuation mark, comma</td> <td><em>, <br></em></td> </tr> <tr> <td><span class="postag">: </span></td> <td>punctuation mark, colon</td> <td><em>: <br></em></td> </tr> <tr> <td><span class="postag">( </span></td> <td>contextual separator, left paren</td> <td><em>( <br></em></td> </tr> <tr> <td><span class="postag">) </span></td> <td>contextual separator, right paren</td> <td><em>) <br></em></td> </tr> </tbody> </table> chunks <table class="border"> <tbody> <tr> <td><span class="smallcaps">Tag </span></td> <td><span class="smallcaps">Description </span></td> <td><span class="smallcaps">Words </span></td> <td><span class="smallcaps">Example </span></td> <td align="right">%</td> </tr> <tr> <td><span class="postag">NP </span></td> <td>noun phrase<span class="postag">&nbsp;</span></td> <td><span class="postag">DT</span>+<span class="postag">RB</span>+<span class="postag">JJ</span>+<span class="postag">NN</span> + <span class="postag">PR</span></td> <td><em>the strange bird</em></td> <td align="right">&nbsp;51</td> </tr> <tr> <td><span class="postag">PP </span></td> <td>prepositional phrase</td> <td><span class="postag">TO</span>+<span class="postag">IN </span></td> <td><em>in between</em></td> <td align="right">&nbsp;19</td> </tr> <tr> <td><span class="postag">VP&nbsp; </span></td> <td>verb phrase&nbsp;</td> <td><span class="postag">RB</span>+<span class="postag">MD</span>+<span class="postag">VB&nbsp; </span></td> <td><em>was looking<br></em></td> <td align="right">9</td> </tr> <tr> <td><span class="postag">ADVP</span></td> <td>adverb phrase</td> <td><span class="postag">RB</span></td> <td><em>also<br></em></td> <td align="right">&nbsp;6</td> </tr> <tr> <td><span class="postag">ADJP</span></td> <td>adjective phrase<span class="postag">&nbsp;</span></td> <td><span class="postag">CC</span>+<span class="postag">RB</span>+<span class="postag">JJ</span></td> <td><em>warm and cosy</em></td> <td align="right">&nbsp;3</td> </tr> <tr> <td><span class="postag">SBAR</span></td> <td>subordinating conjunction&nbsp;</td> <td><span class="postag">IN</span></td> <td><em><span style="text-decoration: underline;">whether</span> or not<br></em></td> <td align="right">3</td> </tr> <tr> <td><span class="postag">PRT </span></td> <td>particle</td> <td><span class="postag">RP</span></td> <td><em><span style="text-decoration: underline;">up</span> the stairs</em></td> <td align="right">&nbsp;1</td> </tr> <tr> <td><span class="postag">INTJ</span></td> <td>interjection</td> <td><span class="postag">UH</span></td> <td><em>hello</em><em><br></em></td> <td align="right">&nbsp;0</td> </tr> </tbody> </table> 2. seach for patterns End of explanation """ from nltk.corpus import wordnet as wn from pattern.en import parsetree from pattern.search import taxonomy, WordNetClassifier, search taxonomy.classifiers.append(WordNetClassifier()) def get_parts(word, pos, recursive=False): parts = [word, ] parts += taxonomy.children(word, pos=pos, recursive=recursive) parts += taxonomy.parents(word, pos=pos, recursive=recursive) return parts def word_search(t, word, pos): parts = get_parts(word, pos) results = search(pos, t) for result in results: # print result.string, parts if any(x in result.string.split() for x in parts): return True return False def run_a_rule(sent, word, pos): t = parsetree(sent) return word_search(t, word, pos) """ Explanation: 3. create similar word list (stemming + synsets) End of explanation """ print "1. 'sound' is a NN" print run_a_rule(sents[0], 'noise', 'NN') print "2. clatter is a NN" print run_a_rule(sents[1], 'noise', 'NN') print "3. acoustics is NNS and RB Not" print run_a_rule(sents[2], 'acoustics', 'NNS') and run_a_rule(sents[2], 'not', 'RB') print "4. sound is a VB" print run_a_rule(sents[3], 'noise', 'VB*') print "5. Sounds is JJ" print run_a_rule(sents[4], 'sound', 'JJ') print "6. sound is VBP" print run_a_rule(sents[5], 'noise', 'VB*') """ Explanation: 3. test End of explanation """ def ext_func(tgt): return bool(not (run_a_rule(tgt, 'noise', 'VB*') and not run_a_rule(tgt, 'sound', 'JJ')) and (run_a_rule(tgt, 'noise', 'NN') or run_a_rule(tgt, 'acoustics', 'NNS') or (run_a_rule(tgt, 'acoustics', 'NNS') and run_a_rule(tgt, 'not', 'RB')))) print "bad noises in review:" for sent in bad_sounds: print "\t" + sent assert(ext_func(sent) == True) print print "no mention of bad noises:" for sent in not_bad_sounds: print "\t" + sent assert(ext_func(sent) == False) """ Explanation: 4. create a feature extractor function End of explanation """ import zipfile import pickle from lxml import etree from StringIO import StringIO zf = zipfile.ZipFile('nhtsa_as_xml.zip', 'r') nhtsa_injured = zf.read('nhtsa_injured.xml') nhtsa_not_injured = zf.read('nhtsa_not_injured.xml') xml_injured = etree.parse(StringIO(nhtsa_injured)) xml_not_injured = etree.parse(StringIO(nhtsa_not_injured)) def injured(l): return ['0' != str(x) and 'injured' or 'notinjured' for x in l] def data(x): out = [x.xpath("//rows/row/@c1"), injured(x.xpath("//rows/row/@c8")), x.xpath("//rows/row/@c2")] return list(reversed(zip(*out))) xml_injured_data = data(xml_injured)[:800] xml_not_injured_data = data(xml_not_injured)[:800] xml_injured_data[0] from visualize_word_graph import draw_graph draw_graph("injury") import nltk.classify.util from nltk.classify import NaiveBayesClassifier from pattern.search import taxonomy, search taxonomy.append('dislocated', type='injury') taxonomy.append('sustained', type='injury') taxonomy.append('burn', type='injury') taxonomy.append('injury', type='hurt') def check_sustained(text): if len(search('HURT', text)) > 0: return True return False def feats(text): words = text.replace(".", "").split() out = dict([(word, True) for word in words]) if 'SUSTAINED' in out: del out['SUSTAINED'] out['rule(SUSTAINED)'] = check_sustained(text) return out negcutoff = len(xml_not_injured_data)*3/4 poscutoff = len(xml_injured_data)*3/4 not_inj_data = xml_not_injured_data[:negcutoff] + xml_injured_data[:poscutoff] inj_data = xml_not_injured_data[negcutoff:] + xml_injured_data[poscutoff:] negfeats = [(feats(f[2]), 'not') for f in not_inj_data] posfeats = [(feats(f[2]), 'injure') for f in inj_data] egcutoff = len(negfeats)*3/4 poscutoff = len(posfeats)*3/4 trainfeats = negfeats[:negcutoff] + posfeats[:poscutoff] testfeats = negfeats[negcutoff:] + posfeats[poscutoff:] print 'train on %d instances, test on %d instances' % (len(trainfeats), len(testfeats)) classifier = NaiveBayesClassifier.train(trainfeats) print 'accuracy:', nltk.classify.util.accuracy(classifier, testfeats) classifier.show_most_informative_features(n=100) classifier.classify(feats("HE SUSTAINED INJURY")) """ Explanation: Machine Learning Example End of explanation """
gwsb-istm-6212-fall-2016/syllabus-and-schedule
exercises/exercise-03.ipynb
cc0-1.0
NAME = "dchud" COLLABORATORS = "" """ Explanation: Before you turn this problem in, make sure everything runs as expected. First, restart the kernel (in the menubar, select Kernel$\rightarrow$Restart) and then run all cells (in the menubar, select Cell$\rightarrow$Run All). Make sure you fill in any place that says YOUR CODE HERE or "YOUR ANSWER HERE", as well as your name and collaborators below: End of explanation """ !wget --quiet -O boating.csv "https://data.ct.gov/api/views/mrb6-7ee5/rows.csv?accessType=DOWNLOAD" """ Explanation: Exercise 03 - Due Friday, September 30 at 12pm Objectives: Gain experience loading a CSV dataset into a database and using SQL to explore its contents. Write and execute a number of SQL queries using common syntax and functions. Grading criteria: The tasks should all be completed, and questions should all be answered with SQL queries in the space provided, unless a text answer is requested. Results should be correct, according to the embedded tests. The notebook itself should be completely reproducible; from start to finish, another person should be able to use the same code to obtain the same results as yours. Deadline: Friday, September 30, 12pm. Submit your notebook to Blackboard and push it to your GitHub repository. In this notebook we'll download a clean CSV dataset from data.gov, load it into a SQLite database, and perform a series of queries to answer several questions. For each problem, write and execute queries that provides the answer in the cells provided, with your SQL queries in the places marked. For each problem after executing your query, immediately execute the following test cell. If the tests complete without error, you have completed that question successfully. If errors arise, you might be missing something. Do not change the tests, just execute them as they are, and update or refine your query until the tests pass. For this assignment, you need not add narrative description to most of your queries (except where explicitly noted), although you may do so if something you see in the data prompts you. If you do, add new text cells and use Markdown formatting. Suggestion: if you have worked through the Software Carpentry SQL lessons and have run through the last two lecture notes notebooks, this should all be fairly easy. If you have done neither, do them now, before you begin. Setup - obtain data and create database The Connecticut DMV Boating Registrations dataset comprises several years of summary records. It is available from data.gov. First we download the dataset (note: it might take several seconds): End of explanation """ !head boating.csv | csvlook """ Explanation: Verify that it's what we think it is on the commandline: End of explanation """ !wc -l boating.csv """ Explanation: Looks right. How many records are there? End of explanation """ !csvstat boating.csv """ Explanation: So that should be 145, counting the header. And the basic stats: End of explanation """ !mv boating.csv boating-orig.csv """ Explanation: Looks about right! Note, though, that the column names have spaces, punctuation, and Upper Cased Names. That's annoying! First let's rename the header line. End of explanation """ !echo "year,tx_type,num" > boating.csv !tail -n +2 boating-orig.csv >> boating.csv !head boating.csv | csvlook """ Explanation: Okay, using output redirection and tail we can write a new header line. End of explanation """ !csvsql --db sqlite:///boating.db --insert boating.csv """ Explanation: Much easier to work with now. Next we convert the updated csv file into a SQLite database using CSVkit. End of explanation """ %load_ext sql """ Explanation: To work with it, we'll need the ipython-sql extension loaded, and then we'll need to connect to the db. End of explanation """ %sql sqlite:///boating.db %%sql SELECT COUNT(*) FROM boating; """ Explanation: Note if you see a pink box above with six lines of "ShimWarning" and "UserWarning", don't panic. This is just a warning message from the ipython-sql package. You are good to go. End of explanation """ %%sql SELECT * FROM boating LIMIT 10; """ Explanation: Looks like the same number of rows! We're good to go. Basic queries In the following queries, we'll do some basic exploration of the data. Let's first see what a few records look like. End of explanation """ %%sql SELECT * FROM boating WHERE tx_type = "BOAT CHANGE OF TYPE"; """ Explanation: This should look familiar! Let's look at just the "change" types. End of explanation """ %%sql SELECT COUNT(*) FROM boating WHERE tx_type = "BOAT CHANGE OF TYPE"; """ Explanation: How many is that, and which year had the most? End of explanation """ %%sql SELECT * FROM boating WHERE tx_type = "BOAT CHANGE OF TYPE" ORDER BY num DESC; """ Explanation: Which year had the most of these transactions? End of explanation """ %%sql SELECT year FROM boating; df = _.DataFrame() for y in [2008, 2009, 2010, 2011, 2012, 2013, 2014]: assert y in df.year.values assert len(df) == 7 """ Explanation: ...alright, your turn. Question 1 Use DISTINCT to determine the unique set of years in this dataset. End of explanation """ -- YOUR CODE HERE df = _.DataFrame() assert len(df) == 21 for tx_type in ["BOAT, DUPLICATE REGISTRATION", "REN BOAT PONTOON", "BOAT REG, CANOE"]: assert tx_type in df.tx_type.values """ Explanation: Question 2 Use DISTINCT to determine the unique set of transaction types in this dataset. End of explanation """ -- YOUR CODE HERE df = _.DataFrame() df.rename(columns=lambda x: "year" if x == "year" else "count", inplace=True) assert (df.loc[df['year'] == 2009]['count'] == 21).all() assert (df.loc[df['year'] == 2013]['count'] == 20).all() """ Explanation: Question 3 Use GROUP BY to determine the overall number of transactions (across all types) per year. End of explanation """ -- YOUR CODE HERE df = _.DataFrame() df.rename(columns=lambda x: "tx_type" if x == "tx_type" else "sum", inplace=True) assert (df.loc[df['tx_type'] == "BOAT REG, CANOE"]['sum'] == 1129).all() assert (df.loc[df['tx_type'] == "REN BOAT REGISTRATION"]['sum'] == 640790).all() """ Explanation: Question 4 Use SUM and GROUP BY to determine the overall number of transactions (across all years) per type. End of explanation """ -- YOUR CODE HERE df = _.DataFrame() df.rename(columns=lambda x: "tx_type" if x == "tx_type" else "sum_num", inplace=True) assert len(df) == 5 assert (df.loc[df['tx_type'] == "REN BOAT PONTOON"]['sum_num'] == 15556).all() assert (df.loc[df['tx_type'] == "REN BOAT REGISTRATION"]['sum_num'] == 640790).all() assert df['sum_num'].max() == 640790 assert df['sum_num'].min() == 2390 """ Explanation: Question 5 Use ORDER BY and LIMIT to determine the top five types of transactions overall. End of explanation """ -- YOUR CODE HERE df = _.DataFrame() assert len(df) == 5 assert (df.loc[df['tx_type'] == "REN BOAT PONTOON"]['num'] == 2118).all() assert (df.loc[df['tx_type'] == "BOAT REG, NEW"]['num'] == 12569).all() assert df['num'].max() == 94005 assert df['num'].min() == 555 """ Explanation: Question 6 Using ORDER BY and LIMIT again, what were the top five types of transactions in 2010? End of explanation """ -- YOUR CODE HERE df = _.DataFrame() assert df.values[0][0] in [7, 9] """ Explanation: Question 7 Use a wildcard search to determine how many transactions in 2012 were renewals. End of explanation """ -- YOUR CODE HERE df = _.DataFrame() assert df.values[0][0] == 14 """ Explanation: Question 8 How many transactions overall involve canoes? End of explanation """ -- YOUR CODE HERE df = _.DataFrame() assert df.values[0][0] in [12071, 14466] """ Explanation: Question 9 How many transactions in 2011 involved a new registration (as opposed to a renewal or change of type)? End of explanation """ -- YOUR CODE HERE """ Explanation: Question 10 How do the transaction trends over time involving pontoons compare to overall boating transaction activity? Discuss as appropriate, adding Markdown cells for your discussion after your exploratory queries. End of explanation """
ray-project/ray
doc/source/tune/examples/ax_example.ipynb
apache-2.0
# !pip install ray[tune] !pip install ax-platform==0.2.4 """ Explanation: Running Tune experiments with AxSearch In this tutorial we introduce Ax, while running a simple Ray Tune experiment. Tune’s Search Algorithms integrate with Ax and, as a result, allow you to seamlessly scale up a Ax optimization process - without sacrificing performance. Ax is a platform for optimizing any kind of experiment, including machine learning experiments, A/B tests, and simulations. Ax can optimize discrete configurations (e.g., variants of an A/B test) using multi-armed bandit optimization, and continuous/ordered configurations (e.g. float/int parameters) using Bayesian optimization. Results of A/B tests and simulations with reinforcement learning agents often exhibit high amounts of noise. Ax supports state-of-the-art algorithms which work better than traditional Bayesian optimization in high-noise settings. Ax also supports multi-objective and constrained optimization which are common to real-world problems (e.g. improving load time without increasing data use). Ax belongs to the domain of "derivative-free" and "black-box" optimization. In this example we minimize a simple objective to briefly demonstrate the usage of AxSearch with Ray Tune via AxSearch. It's useful to keep in mind that despite the emphasis on machine learning experiments, Ray Tune optimizes any implicit or explicit objective. Here we assume ax-platform==0.2.4 library is installed withe python version >= 3.7. To learn more, please refer to the Ax website. End of explanation """ import numpy as np import time import ray from ray import tune from ray.tune.suggest.ax import AxSearch """ Explanation: Click below to see all the imports we need for this example. You can also launch directly into a Binder instance to run this notebook yourself. Just click on the rocket symbol at the top of the navigation. End of explanation """ def landscape(x): """ Hartmann 6D function containing 6 local minima. It is a classic benchmark for developing global optimization algorithms. """ alpha = np.array([1.0, 1.2, 3.0, 3.2]) A = np.array( [ [10, 3, 17, 3.5, 1.7, 8], [0.05, 10, 17, 0.1, 8, 14], [3, 3.5, 1.7, 10, 17, 8], [17, 8, 0.05, 10, 0.1, 14], ] ) P = 10 ** (-4) * np.array( [ [1312, 1696, 5569, 124, 8283, 5886], [2329, 4135, 8307, 3736, 1004, 9991], [2348, 1451, 3522, 2883, 3047, 6650], [4047, 8828, 8732, 5743, 1091, 381], ] ) y = 0.0 for j, alpha_j in enumerate(alpha): t = 0 for k in range(6): t += A[j, k] * ((x[k] - P[j, k]) ** 2) y -= alpha_j * np.exp(-t) return y """ Explanation: Let's start by defining a classic benchmark for global optimization. The form here is explicit for demonstration, yet it is typically a black-box. We artificially sleep for a bit (0.02 seconds) to simulate a long-running ML experiment. This setup assumes that we're running multiple steps of an experiment and try to tune 6-dimensions of the x hyperparameter. End of explanation """ def objective(config): for i in range(config["iterations"]): x = np.array([config.get("x{}".format(i + 1)) for i in range(6)]) tune.report( timesteps_total=i, landscape=landscape(x), l2norm=np.sqrt((x ** 2).sum()) ) time.sleep(0.02) """ Explanation: Next, our objective function takes a Tune config, evaluates the landscape of our experiment in a training loop, and uses tune.report to report the landscape back to Tune. End of explanation """ search_space = { "iterations":100, "x1": tune.uniform(0.0, 1.0), "x2": tune.uniform(0.0, 1.0), "x3": tune.uniform(0.0, 1.0), "x4": tune.uniform(0.0, 1.0), "x5": tune.uniform(0.0, 1.0), "x6": tune.uniform(0.0, 1.0) } ray.init(configure_logging=False) """ Explanation: Next we define a search space. The critical assumption is that the optimal hyperparamters live within this space. Yet, if the space is very large, then those hyperparamters may be difficult to find in a short amount of time. End of explanation """ algo = AxSearch( parameter_constraints=["x1 + x2 <= 2.0"], outcome_constraints=["l2norm <= 1.25"], ) """ Explanation: Now we define the search algorithm from AxSearch. If you want to constrain your parameters or even the space of outcomes, that can be easily done by passing the argumentsas below. End of explanation """ algo = tune.suggest.ConcurrencyLimiter(algo, max_concurrent=4) """ Explanation: We also use ConcurrencyLimiter to constrain to 4 concurrent trials. End of explanation """ num_samples = 100 stop_timesteps = 200 # Reducing samples for smoke tests num_samples = 10 """ Explanation: The number of samples is the number of hyperparameter combinations that will be tried out. This Tune run is set to 1000 samples. You can decrease this if it takes too long on your machine, or you can set a time limit easily through stop argument in tune.run() as we will show here. End of explanation """ analysis = tune.run( objective, name="ax", metric="landscape", mode="min", search_alg=algo, num_samples=num_samples, config=search_space, stop={"timesteps_total": stop_timesteps} ) """ Explanation: Finally, we run the experiment to find the global minimum of the provided landscape (which contains 5 false minima). The argument to metric, "landscape", is provided via the objective function's tune.report. The experiment "min"imizes the "mean_loss" of the landscape by searching within search_space via algo, num_samples times or when "timesteps_total": stop_timesteps. This previous sentence is fully characterizes the search problem we aim to solve. With this in mind, notice how efficient it is to execute tune.run(). End of explanation """ print("Best hyperparameters found were: ", analysis.best_config) ray.shutdown() """ Explanation: And now we have the hyperparameters found to minimize the mean loss. End of explanation """
tensorflow/recommenders
docs/examples/sequential_retrieval.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2021 The TensorFlow Authors. End of explanation """ !pip install -q tensorflow-recommenders !pip install -q --upgrade tensorflow-datasets import os import pprint import tempfile from typing import Dict, Text import numpy as np import tensorflow as tf import tensorflow_datasets as tfds import tensorflow_recommenders as tfrs """ Explanation: Recommending movies: retrieval using a sequential model <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/recommenders/examples/sequential_retrieval"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/recommenders/blob/main/docs/examples/sequential_retrieval.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/recommenders/blob/main/docs/examples/sequential_retrieval.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/recommenders/docs/examples/sequential_retrieval.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> In this tutorial, we are going to build a sequential retrieval model. Sequential recommendation is a popular model that looks at a sequence of items that users have interacted with previously and then predicts the next item. Here the order of the items within each sequence matters, so we are going to use a recurrent neural network to model the sequential relationship. For more details, please refer to this GRU4Rec paper. Imports First let's get our dependencies and imports out of the way. End of explanation """ !wget -nc https://raw.githubusercontent.com/tensorflow/examples/master/lite/examples/recommendation/ml/data/example_generation_movielens.py !python -m example_generation_movielens --data_dir=data/raw --output_dir=data/examples --min_timeline_length=3 --max_context_length=10 --max_context_movie_genre_length=10 --min_rating=2 --train_data_fraction=0.9 --build_vocabs=False """ Explanation: Preparing the dataset Next, we need to prepare our dataset. We are going to leverage the data generation utility in this TensorFlow Lite On-device Recommendation reference app. MovieLens 1M data contains ratings.dat (columns: UserID, MovieID, Rating, Timestamp), and movies.dat (columns: MovieID, Title, Genres). The example generation script download the 1M dataset, takes both files, only keep ratings higher than 2, form user movie interaction timelines, sample activities as labels and 10 previous user activities as the context for prediction. End of explanation """ train_filename = "./data/examples/train_movielens_1m.tfrecord" train = tf.data.TFRecordDataset(train_filename) test_filename = "./data/examples/test_movielens_1m.tfrecord" test = tf.data.TFRecordDataset(test_filename) feature_description = { 'context_movie_id': tf.io.FixedLenFeature([10], tf.int64, default_value=np.repeat(0, 10)), 'context_movie_rating': tf.io.FixedLenFeature([10], tf.float32, default_value=np.repeat(0, 10)), 'context_movie_year': tf.io.FixedLenFeature([10], tf.int64, default_value=np.repeat(1980, 10)), 'context_movie_genre': tf.io.FixedLenFeature([10], tf.string, default_value=np.repeat("Drama", 10)), 'label_movie_id': tf.io.FixedLenFeature([1], tf.int64, default_value=0), } def _parse_function(example_proto): return tf.io.parse_single_example(example_proto, feature_description) train_ds = train.map(_parse_function).map(lambda x: { "context_movie_id": tf.strings.as_string(x["context_movie_id"]), "label_movie_id": tf.strings.as_string(x["label_movie_id"]) }) test_ds = test.map(_parse_function).map(lambda x: { "context_movie_id": tf.strings.as_string(x["context_movie_id"]), "label_movie_id": tf.strings.as_string(x["label_movie_id"]) }) for x in train_ds.take(1).as_numpy_iterator(): pprint.pprint(x) """ Explanation: Here is a sample of the generated dataset. 0 : { features: { feature: { key : "context_movie_id" value: { int64_list: { value: [ 1124, 2240, 3251, ..., 1268 ] } } } feature: { key : "context_movie_rating" value: { float_list: {value: [ 3.0, 3.0, 4.0, ..., 3.0 ] } } } feature: { key : "context_movie_year" value: { int64_list: { value: [ 1981, 1980, 1985, ..., 1990 ] } } } feature: { key : "context_movie_genre" value: { bytes_list: { value: [ "Drama", "Drama", "Mystery", ..., "UNK" ] } } } feature: { key : "label_movie_id" value: { int64_list: { value: [ 3252 ] } } } } } You can see that it includes a sequence of context movie IDs, and a label movie ID (next movie), plus context features such as movie year, rating and genre. In our case we will only be using the sequence of context movie IDs and the label movie ID. You can refer to the Leveraging context features tutorial to learn more about adding additional context features. End of explanation """ movies = tfds.load("movielens/1m-movies", split='train') movies = movies.map(lambda x: x["movie_id"]) movie_ids = movies.batch(1_000) unique_movie_ids = np.unique(np.concatenate(list(movie_ids))) """ Explanation: Now our train/test datasets include only a sequence of historical movie IDs and a label of next movie ID. Note that we use [10] as the shape of the features during tf.Example parsing because we specify 10 as the length of context features in the example generateion step. We need one more thing before we can start building the model - the vocabulary for our movie IDs. End of explanation """ embedding_dimension = 32 query_model = tf.keras.Sequential([ tf.keras.layers.StringLookup( vocabulary=unique_movie_ids, mask_token=None), tf.keras.layers.Embedding(len(unique_movie_ids) + 1, embedding_dimension), tf.keras.layers.GRU(embedding_dimension), ]) candidate_model = tf.keras.Sequential([ tf.keras.layers.StringLookup( vocabulary=unique_movie_ids, mask_token=None), tf.keras.layers.Embedding(len(unique_movie_ids) + 1, embedding_dimension) ]) """ Explanation: Implementing a sequential model In our basic retrieval tutorial, we use one query tower for the user, and the candidate tow for the candidate movie. However, the two-tower architecture is generalizble and not limited to <user,item> pair. You can also use it to do item-to-item recommendation as we note in the basic retrieval tutorial. Here we are still going to use the two-tower architecture. Specificially, we use the query tower with a Gated Recurrent Unit (GRU) layer to encode the sequence of historical movies, and keep the same candidate tower for the candidate movie. End of explanation """ metrics = tfrs.metrics.FactorizedTopK( candidates=movies.batch(128).map(candidate_model) ) task = tfrs.tasks.Retrieval( metrics=metrics ) class Model(tfrs.Model): def __init__(self, query_model, candidate_model): super().__init__() self._query_model = query_model self._candidate_model = candidate_model self._task = task def compute_loss(self, features, training=False): watch_history = features["context_movie_id"] watch_next_label = features["label_movie_id"] query_embedding = self._query_model(watch_history) candidate_embedding = self._candidate_model(watch_next_label) return self._task(query_embedding, candidate_embedding, compute_metrics=not training) """ Explanation: The metrics, task and full model are defined similar to the basic retrieval model. End of explanation """ model = Model(query_model, candidate_model) model.compile(optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.1)) cached_train = train_ds.shuffle(10_000).batch(12800).cache() cached_test = test_ds.batch(2560).cache() model.fit(cached_train, epochs=3) model.evaluate(cached_test, return_dict=True) """ Explanation: Fitting and evaluating We can now compile, train and evaluate our sequential retrieval model. End of explanation """
AnyBody-Research-Group/AnyPyTools
docs/Tutorial/01_Getting_started_with_anypytools.ipynb
mit
from anypytools import AnyPyProcess app = AnyPyProcess() """ Explanation: Getting Started with AnyPyTools Running a simple macro <img src="Tutorial_files/knee.gif" alt="Drawing" align="Right" width=120 /> For the sake of the tutorial we will use a small 'toy' model of a simplified knee joint (see the figure.) The model is defined in the file Knee.any, which is placed in the current working directory. Next, let us run the model from python. First, we import the AnyPyProcess class and create an instance of the class. End of explanation """ macrolist = [ 'load "Knee.any"', 'operation Main.MyStudy.Kinematics', 'run', ] app.start_macro(macrolist); """ Explanation: Next, we need to instruct the AnyBody Modelling System to load the and run the model. We do this using AnyScript macro commands. These are short commands that can automate operations in the AnyBody Modeling System (AMS). Operation that are normally done by pointing and clicking in the AMS graphical user interface. You can read more on AnyScript macros in the "User Interface Features" tutorial that accompanies the AnyBody Modeling System. Now we define an AnyScript macro that we want to run on the model. load "Knee.any" operation Main.MyStudy.Kinematics run The macro will command AnyBody to load the model and run the Kinematics operation. The macro is executed by parsing it to the start_macro() method of the AnyPyProcess object. End of explanation """ macrolist = [ ['load "Knee.any"', 'operation Main.MyStudy.Kinematics', 'run'], ['load "Knee.any"', 'operation Main.MyStudy.InverseDynamics', 'run'], ] app.start_macro(macrolist); """ Explanation: Running multiple macros It is easy to run multiple macros by adding an extra set of macro commands to the macro list. End of explanation """ macrolist = [] for i in range(40): macro = [ 'load "Knee.any"', 'operation Main.MyStudy.InverseDynamics', 'run', ] macrolist.append(macro) """ Explanation: Parallel execution Notice that AnyPyProcess will run the anyscript macros in parallel. Modern computers have multiple cores, but a single AnyBody instance can only utilize a single core, leaving us with a great potential for speeding things up through parallelization. To test this, let us create ten macros in a for-loop. End of explanation """ # First sequentially app = AnyPyProcess(num_processes = 1) app.start_macro(macrolist); # Then with parallization app = AnyPyProcess(num_processes = 4) app.start_macro(macrolist); """ Explanation: AnyPyProcess has a parameter 'num_processes' that controls the number of parallel processes. Let us try a small example to see the difference in speed: End of explanation """ import numpy as np macrolist = [ 'load "Knee.any"', 'operation Main.MyStudy.InverseDynamics', 'run', 'classoperation Main.MyStudy.Output.MaxMuscleActivity "Dump"', ] results = app.start_macro(macrolist) """ Explanation: Note: In general you should not user a num_processes larger than the number of cores in your computer. Getting data from the AnyBody Model In the following macro, we have added a new class operation to 'Dump' the result of the maximum muscle activity. The start_macro method will return all the dumped variables: End of explanation """ max_muscle_act = results[0]['Main.MyStudy.Output.MaxMuscleActivity'] import numpy as np import matplotlib.pyplot as plt %matplotlib inline plt.plot(max_muscle_act); """ Explanation: We can export more variables by adding more classoperation. But there is a better way of doing this, as we shall see in the next tutorials. Finally, to make a plot we import the matplotlib library, and enable inline figures. End of explanation """