repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
statkraft/shyft-doc
notebooks/grid-pp/kalman_updating.ipynb
lgpl-3.0
# first you should import the third-party python modules which you'll use later on # the first line enables that figures are shown inline, directly in the notebook %matplotlib inline import datetime import numpy as np import os from os import path import sys # once the shyft_path is set correctly, you should be able to import shyft modules from shyft import api from matplotlib import pyplot as plt """ Explanation: This notebook sets up a Kalman filter and demonstrates how choice of parameters impact the updating 1. Loading required python modules and setting path to SHyFT installation End of explanation """ # Create the Kalman filter having 8 samples spaced every 3 hours to represent a daily periodic pattern p=api.KalmanParameter() kf = api.KalmanFilter(p) # Check default parameters print(kf.parameter.n_daily_observations) print(kf.parameter.hourly_correlation) print(kf.parameter.covariance_init) print(kf.parameter.ratio_std_w_over_v) print(kf.parameter.std_error_bias_measurements) # help(kf) # help(api.KalmanState) """ Explanation: 2. Create Kalman filter End of explanation """ # Create initial states s=kf.create_initial_state() # Check systemic variance matrix and default start states # Note that coefficients in matrix W are of form # (std_error_bias_measurements*ratio_std_w_over_v)^2*hourly_correlation^|i-j|, # while coeffisients of P are of same form but with different variance. print(np.array_str(s.W, precision=4, suppress_small=True)) print("") print(np.array_str(s.P, precision=4, suppress_small=True)) print("") print(np.array_str(s.x, precision=4, suppress_small=True)) print(np.array_str(s.k, precision=4, suppress_small=True)) """ Explanation: 3. Creates start states End of explanation """ # Create a bias temperature observation series with some noise, update the filter, # and for each update save the states belonging to the first segment of the day number_days = 10 observation = np.empty((8*number_days,1)) kalman_gain = np.empty((8*number_days,1)) std_P = np.empty((8*number_days,1)) learning = np.empty((8*number_days,1)) for i in range(len(observation)): obs_bias = 2 + 0.3*np.random.randn() # Expected bias = 2 with noise kf.update(obs_bias,api.deltahours(i*3),s) std_P[i] = pow(s.P[0,0],0.5) # Values for hour 0000 UTC observation[i] = obs_bias kalman_gain[i] = s.k[0] # Values for hour 0000 UTC learning[i] = s.x[0] # Values for hour 0000 UTC # help(api.TimeSeries) """ Explanation: 4. Update filter End of explanation """ fig, ax = plt.subplots(figsize=(20,15)) ax.plot(observation, 'b', label = 'Observation') ax.plot(std_P, 'k', label = 'Std. P') ax.plot(kalman_gain, 'g', label = 'Kalman gain') ax.plot(learning, 'r', label = 'Kalman learning') ax.legend() ax.set_title('State development for first time segment', fontsize=20, fontweight = 'bold') """ Explanation: 5. Plot results End of explanation """ # Create new parameters and filter p_new = api.KalmanParameter() p_new.std_error_bias_measurements = 1 p_new.ratio_std_w_over_v = 0.10 kf_new = api.KalmanFilter(p_new) # Initial states s_new = kf_new.create_initial_state() # Update with same observation as above kalman_gain_new = np.empty((8*number_days,1)) std_P_new = np.empty((8*number_days,1)) learning_new = np.empty((8*number_days,1)) for i in range(len(observation)): kf_new.update(observation.item(i),api.deltahours(i*3),s_new) std_P_new[i] = pow(s_new.P[0,0],0.5) # Values for hour 0000 UTC kalman_gain_new[i] = s_new.k[0] # Values for hour 0000 UTC learning_new[i] = s_new.x[0] # Values for hour 0000 UTC # Plot results fig, ax = plt.subplots(figsize=(20,15)) ax.plot(observation, 'b', label = 'Observation') ax.plot(std_P, 'k', label = 'Std. P') ax.plot(kalman_gain, 'g', label = 'Kalman gain') ax.plot(learning, 'r', label = 'Kalman learning') ax.plot(std_P_new, 'k--', label = 'Std. P') ax.plot(kalman_gain_new, 'g--', label = 'Kalman gain') ax.plot(learning_new, 'r--', label = 'Kalman learning') ax.legend() ax.set_title('State development for first time segment', fontsize=20, fontweight = 'bold') """ Explanation: 6. Test to see how choice of parameters impact filter End of explanation """
ucsd-ccbb/Oncolist
notebooks/.ipynb_checkpoints/BasicCFNClusterSetup-checkpoint.ipynb
mit
import os import sys sys.path.append(os.getcwd().replace("notebooks", "cfncluster")) ## Input the AWS account access keys aws_access_key_id = "/**aws_access_key_id**/" aws_secret_access_key = "/**aws_secret_access_key**/" ## CFNCluster name your_cluster_name = "geo" ## The private key pair for accessing cluster. private_key = "/path/to/private_key.pem" ## If delete cfncluster after job is done. delete_cfncluster = False """ Explanation: <h1 align="center">Basic CFNCluster Setup</h1> <h3 align="center">Author: Guorong Xu (g1xu@ucsd.edu) </h3> <h3 align="center">2016-9-19</h3> The notebook is an example that tells you how to call API to install, configure CFNCluster package, create a cluster, and connect to the master node. Currently we only support Linux, Mac OS platforms. <font color='red'>Notice:</font> First step is to fill in the AWS account access keys and then follow the instructions to install CFNCluster package and create a cluster. End of explanation """ import CFNClusterManager CFNClusterManager.install_cfn_cluster() """ Explanation: 1. Install CFNCluster Notice: The CFNCluster package can be only installed on Linux box which supports pip installation. End of explanation """ import CFNClusterManager CFNClusterManager.upgrade_cfn_cluster() """ Explanation: 2. Upgrade CFNCluster End of explanation """ import CFNClusterManager ## Configure cfncluster settings CFNClusterManager.insert_access_keys(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) CFNClusterManager.config_key_name(private_key) CFNClusterManager.config_instance_types(master_instance_type="m3.large", compute_instance_type="r3.2xlarge") CFNClusterManager.config_initial_cluster_size(initial_cluster_size="0") CFNClusterManager.config_spot_price(spot_price="0.7") CFNClusterManager.config_volume_size(volume_size="300") CFNClusterManager.config_ebs_snapshot_id(ebs_snapshot_id="snap-5faff708") CFNClusterManager.config_aws_region_name(aws_region_name="us-west-2") CFNClusterManager.config_post_install(post_install="s3://path/to/postinstall.sh") CFNClusterManager.config_vpc_subnet_id(master_subnet_id="subnet-00000000", vpc_id="vpc-00000000") CFNClusterManager.config_s3_resource(s3_read_resource="s3://bucket_name/", s3_read_write_resource="s3://bucket_name/") """ Explanation: 3. Configure CFNCluster To configure CFNCluster settings, you need to import the package CFNCluster. The below functions tell you how to insert AWS access keys, configure instance types, spot price and S3 resource. End of explanation """ CFNClusterManager.view_cfncluster_config() CFNClusterManager.list_cfn_cluster() """ Explanation: After you finish configuration, you can call the below function to double check if your settings are correct. Before you create a new cluster, you can check the current running clusters to avoid to use the different cluster name by call the below function. End of explanation """ master_ip_address = CFNClusterManager.create_cfn_cluster(cluster_name=your_cluster_name) """ Explanation: To create a new cluster, you need to set a cluster name and then call the below function. After the creation is complete, you will see the output information about your cluser IP address. End of explanation """ import ConnectionManager ssh_client = ConnectionManager.connect_master(hostname=master_ip_address, username="ec2-user", private_key_file=private_key) """ Explanation: 4. Manage cluster To manage your new created cluster, you need to import ConnectionManager. The ConnectionManager can create the connection to the master node, execute commands on the master node, transfer files to the master. To create a connection to the master node, you need to set the hostname, username and your private key file. The hostname IP address (MasterPublicIP) can be found when your cluster creation is complete. The private key file should be the same when you configure CFNCluster. End of explanation """ ConnectionManager.close_connection(ssh_client) """ Explanation: After the job is done, you can call the below function to close the connection. End of explanation """ import CFNClusterManager if delete_cfncluster == True: CFNClusterManager.delete_cfn_cluster(cluster_name=your_cluster_name) """ Explanation: To delete the cluster, you just need to set the cluster name and call the below function. End of explanation """
jphall663/GWU_data_mining
02_analytical_data_prep/src/py_part_2_winsorize.ipynb
apache-2.0
import pandas as pd # pandas for handling mixed data sets import numpy as np # numpy for basic math and matrix operations from scipy.stats.mstats import winsorize # scipy for stats and more advanced calculations """ Explanation: License Copyright (C) 2017 J. Patrick Hall, jphall@gwu.edu Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Simple winsorizing - Pandas, numpy, and scipy Imports End of explanation """ scratch_df = pd.DataFrame({'x1': pd.Series(np.random.choice(1000, 20))}) scratch_df """ Explanation: Create sample data set End of explanation """ scratch_df['x1_winsor'] = winsorize(scratch_df['x1'], limits=[0.1, 0.1]) scratch_df """ Explanation: Winsorize End of explanation """
bbartoldson/examples
pong/pong.ipynb
mit
import gym import numpy as np import tensorflow as tf from IPython import display import matplotlib.pyplot as plt import time config = tf.ConfigProto() #config.gpu_options.allow_growth = True %matplotlib inline """ Explanation: Pong-Playing TensorFlow Neural Network Import modules needed to train neural network in Pong environment End of explanation """ env = gym.make("Pong-v0") env.render(mode='rgb_array').shape env.reset() print(env.action_space) print(env.unwrapped.get_action_meanings()) top = 32 bottom = 195 left = 14 right = 146 downsampled_height = int(np.rint((bottom-top)/2)) downsampled_width = int(np.rint((right-left)/2)) input_dim = downsampled_height*downsampled_width def preprocess(img, reshape=False): #crop, grab only one channel, and downsample by factor of 2 img = img[top:bottom,left:right,0][::2,::2] #get rid of background color, 109 in first frame, 144 otherwise img[np.isin(img,[144,109])] = 0 img[img!=0] = 1 if not reshape: return img.astype(np.int).ravel() else: return img.astype(np.int) def reshape(img): return img.reshape(downsampled_height,downsampled_width).astype(np.int) #what color pixels are in this image? #print(list(zip(*np.unique(env.render(mode='rgb_array')[top:bottom,left:right,0],return_counts=1)))) #print(list(zip(*np.unique(env.render(mode='rgb_array')[top:bottom,left:right,0][::2,::2],return_counts=1)))) plt.subplots(2,3, figsize=(12,10)) plt.subplot(2,3,1) plt.title("The Atari Pong Game Screen") plt.imshow(env.reset()) plt.subplot(2,3,2) plt.title("Cropped, First Channel Only") plt.imshow(env.render(mode='rgb_array')[top:bottom,left:right,0]) plt.subplot(2,3,3) plt.title("Prior Plus Downsample") plt.imshow(env.render(mode='rgb_array')[top:bottom,left:right,0][::2,::2]) plt.subplot(2,3,4) plt.title("After a step, color scheme changes") plt.imshow(env.step(2)[0][top:bottom,left:right,0][::2,::2]) plt.subplot(2,3,5) plt.title("After Preprocessing Frame 1") plt.imshow(reshape(preprocess(env.reset()))) plt.subplot(2,3,6) plt.title("After Preprocessing Frame 2") plt.imshow(reshape(preprocess(env.step(2)[0]))) plt.show() #print(list(zip(*np.unique(env.render(mode='rgb_array')[top:bottom,left:right,0][::2,::2],return_counts=1)))) #print(list(zip(*np.unique(reshape(preprocess(env.reset())),return_counts=1)))) #print(list(zip(*np.unique(reshape(preprocess(env.render(mode='rgb_array'))),return_counts=1)))) """ Explanation: Investigate the environment and set up data preprocessing functions End of explanation """ sess = tf.InteractiveSession(config=config) x = tf.placeholder(tf.float32, shape=[None, input_dim]) advantage = tf.placeholder(tf.float32, shape=[None]) action_is_down = tf.placeholder(tf.float32, shape=[None]) h1_dim = 200 l1 = tf.layers.dense(x, h1_dim, activation=tf.nn.relu) #use tf.squeeze() to reshape this from [batch_size,1] to [batch_size] logit = tf.squeeze(tf.layers.dense(l1, 1)) sampled_action_negative_log_prob = tf.nn.sigmoid_cross_entropy_with_logits(logits=logit, labels=action_is_down) reward_weighted_neg_likelihood = advantage*sampled_action_negative_log_prob optimizer = tf.train.AdamOptimizer(learning_rate=1e-4) train = optimizer.minimize(tf.reduce_sum(reward_weighted_neg_likelihood)) saver = tf.train.Saver() tf.global_variables_initializer().run() """ Explanation: Define and initialize the neural network End of explanation """ class pong_agent: def clean_slate(self): self.wins = 0 self.games = 0 self.p_list = [] self.actions = [] self.frames = [] self.frame_changes = [] self.rewards = [] def make_batch(self, n_sets): self.clean_slate() for _ in range(n_sets): self.play_set() self.normalize_rewards() return self.frame_changes, self.actions, self.rewards def play_set(self): env.reset() done = 0 self.frames.append(preprocess(env.render(mode='rgb_array'))) self.frame_changes.append(self.frames[-1] - self.frames[-1]) while not done: done = self.play_point() def play_point(self): frames_played = 0 discount = 0.99 while True: prob, action, reward, new_frame, done = self.play_frame(self.frame_changes[-1]) self.p_list.append(prob) self.actions.append(action) frames_played+= 1 if not done: self.frames.append(new_frame) self.frame_changes.append(self.frames[-1] - self.frames[-2]) if reward: self.rewards+= [reward * discount**k for k in reversed(range(frames_played))] self.wins+= max(reward,0) self.games+= 1 break return done def play_frame(self, frame_change): p_down = 1/(1+np.exp(-sess.run(logit, feed_dict={x:np.array([frame_change])}))) #sample an action using p_down, 3=down, 2=up action = np.random.binomial(1, p_down) + 2 observation, reward, done = env.step(action)[:3] return p_down, action, reward, preprocess(observation), done def normalize_rewards(self): mean = np.mean(self.rewards) std_dev = np.std(self.rewards) self.rewards = (np.array(self.rewards)-mean)/std_dev """ Explanation: Set up an agent class that plays pong using actions chosen by the neural network in the active TensorFlow session End of explanation """ start = time.time() ratios = [] matches_per_batch = 10 epochs = 3001 agent = pong_agent() for i in range(epochs): #play Pong with the network, save frames and associated rewards frame_changes, actions, rewards = agent.make_batch(matches_per_batch) train.run(feed_dict={x:frame_changes, action_is_down:np.array(actions)==3, advantage:rewards}) ratios.append(agent.wins/agent.games*100) if i%10==0: print("{}: batch {} finished after {} hours".format(time.strftime('%X %x '), i, round((time.time()-start)/3600,2))) plt.title("Agent Quality over Time") plt.plot(range(1,i+2), ratios) plt.xlabel("Number of Updates") plt.ylabel("Percent of Games Won") plt.savefig("./pong_agent_quality") if i%100==0: #save out the neural network's weights here saver.save(sess, "./pong_agent.ckpt") """ Explanation: Train the agent for 3000 updates to reach >50% win rate This part takes a while. My setup processes the earlier batches at a rate of ~100 batches/hour. To monitor progress (the agent's win rate), I output at a .PNG plot every 10 batches. There's a visible shift in the win rate from ~2% to ~4% by batch 300. Later batches take longer to process because the agent is playing more games. To speed up and improve quality in the later stages of training, we could trim the number of games played (because each point takes more frames to complete as the agent gets better), and add a decaying learning rate, respectively. End of explanation """ sess.close() tf.reset_default_graph() agent = pong_agent() sess = tf.InteractiveSession(config=config) x = tf.placeholder(tf.float32, shape=[None, input_dim]) advantage = tf.placeholder(tf.float32, shape=[None]) action_is_down = tf.placeholder(tf.float32, shape=[None]) h1_dim = 200 l1 = tf.layers.dense(x, h1_dim, activation=tf.nn.relu) #use tf.squeeze() to reshape this from [batch_size,1] to [batch_size] logit = tf.squeeze(tf.layers.dense(l1, 1)) sampled_action_negative_log_prob = tf.nn.sigmoid_cross_entropy_with_logits(logits=logit, labels=action_is_down) reward_weighted_neg_likelihood = advantage*sampled_action_negative_log_prob optimizer = tf.train.AdamOptimizer(learning_rate=1e-4) train = optimizer.minimize(tf.reduce_sum(reward_weighted_neg_likelihood)) saver = tf.train.Saver() saver.restore(sess, "./pong_agent.ckpt") number_of_frames_to_play = 400 frame = env.reset() new_frame = np.zeros_like(frame) diff = np.zeros_like(preprocess(np.copy(frame))) img = plt.imshow(frame) for i in range(number_of_frames_to_play): action = agent.play_frame(diff)[1] new_frame = env.render(mode='rgb_array') diff = preprocess(np.copy(new_frame))-preprocess(frame) frame = new_frame img.set_data(new_frame) display.display(plt.gcf()) display.clear_output(wait=True) """ Explanation: Restore the saved variables and let the agent play Pong! End of explanation """ import moviepy.editor as mpy global frame, new_frame, diff frame = env.reset() #resetting a couple of pixels that turned black for some reason frame[0][:8]=frame[0][8] new_frame = np.copy(frame) diff = np.zeros_like(preprocess(np.copy(frame))) def make_frame(t): global frame, new_frame, diff action = agent.play_frame(diff)[1] new_frame = env.render(mode='rgb_array') new_frame[0][:8]=new_frame[0][8] diff = preprocess(np.copy(new_frame))-preprocess(np.copy(frame)) frame = new_frame return frame clip = mpy.VideoClip(make_frame, duration=25) clip.write_gif("Pong.gif",fps=15) """ Explanation: Save a GIF End of explanation """
amueller/nyu_ml_lectures
Grid Searches for Hyper Parameters.ipynb
bsd-2-clause
from sklearn.grid_search import GridSearchCV from sklearn.svm import SVC from sklearn.datasets import load_digits from sklearn.cross_validation import train_test_split digits = load_digits() X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target, random_state=0) """ Explanation: Grid Searches <img src="figures/grid_search_cross_validation.svg" width=100%> Grid-Search with build-in cross validation End of explanation """ import numpy as np param_grid = {'C': 10. ** np.arange(-3, 3), 'gamma' : 10. ** np.arange(-5, 0)} np.set_printoptions(suppress=True) print(param_grid) grid_search = GridSearchCV(SVC(), param_grid, verbose=3) """ Explanation: Define parameter grid: End of explanation """ grid_search.fit(X_train, y_train) grid_search.predict(X_test) grid_search.score(X_test, y_test) grid_search.best_params_ # We extract just the scores scores = [x.mean_validation_score for x in grid_search.grid_scores_] scores = np.array(scores).reshape(6, 5) plt.matshow(scores) plt.xlabel('gamma') plt.ylabel('C') plt.colorbar() plt.xticks(np.arange(5), param_grid['gamma']) plt.yticks(np.arange(6), param_grid['C']); """ Explanation: A GridSearchCV object behaves just like a normal classifier. End of explanation """ from sklearn.neighbors import KNeighborsClassifier # %load solutions/grid_search_k_neighbors.py """ Explanation: Nested Cross-validation in scikit-learn: Exercises Use GridSearchCV to adjust n_neighbors of KNeighborsClassifier. Visualize grid_search.grid_scores_. End of explanation """
AllenDowney/ProbablyOverthinkingIt
hierarchical.ipynb
mit
from __future__ import print_function, division from thinkbayes2 import Pmf, Suite from fractions import Fraction """ Explanation: Bayesian interpretation of medical tests This notebooks explores several problems related to interpreting the results of medical tests. Copyright 2016 Allen Downey MIT License: http://opensource.org/licenses/MIT End of explanation """ class Test(Suite): """Represents beliefs about a patient based on a medical test.""" def __init__(self, p, s, t, label='Test'): # initialize the prior probabilities d = dict(sick=p, notsick=1-p) super(Test, self).__init__(d, label) # store the parameters self.p = p self.s = s self.t = t # make a nested dictionary to compute likelihoods self.likelihood = dict(pos=dict(sick=s, notsick=t), neg=dict(sick=1-s, notsick=1-t)) def Likelihood(self, data, hypo): """ data: 'pos' or 'neg' hypo: 'sick' or 'notsick' """ return self.likelihood[data][hypo] """ Explanation: Medical tests Suppose we test a patient to see if they have a disease, and the test comes back positive. What is the probability that the patient is actually sick (that is, has the disease)? To answer this question, we need to know: The prevalence of the disease in the population the patient is from. Let's assume the patient is identified as a member of a population where the known prevalence is p. The sensitivity of the test, s, which is the probability of a positive test if the patient is sick. The false positive rate of the test, t, which is the probability of a positive test if the patient is not sick. Given these parameters, we can compute the probability that the patient is sick, given a positive test. Test class To do that, I'll define a Test class that extends Suite, so it inherits Update and provides Likelihood. The instance variables of Test are: p, s, and t: Copies of the parameters. d: a dictionary that maps from hypotheses to their probabilities. The hypotheses are the strings sick and notsick. likelihood: a dictionary that encodes the likelihood of the possible data values pos and neg under the hypotheses. End of explanation """ p = Fraction(1, 10) # prevalence s = Fraction(9, 10) # sensitivity t = Fraction(3, 10) # false positive rate test = Test(p, s, t) test.Print() """ Explanation: Now we can create a Test object with parameters chosen for demonstration purposes (most medical tests are better than this!): End of explanation """ test.likelihood """ Explanation: If you are curious, here's the nested dictionary that computes the likelihoods: End of explanation """ test.Update('pos') test.Print() """ Explanation: And here's how we update the Test object with a positive outcome: End of explanation """ class MetaTest(Suite): """Represents a set of tests with different values of `t`.""" def Likelihood(self, data, hypo): """ data: 'pos' or 'neg' hypo: Test object """ # the return value from `Update` is the total probability of the # data for a hypothetical value of `t` return hypo.Update(data) """ Explanation: The positive test provides evidence that the patient is sick, increasing the probability from 0.1 to 0.25. Uncertainty about t So far, this is basic Bayesian inference. Now let's add a wrinkle. Suppose that we don't know the value of t with certainty, but we have reason to believe that t is either 0.2 or 0.4 with equal probability. Again, we would like to know the probability that a patient who tests positive actually has the disease. As we did with the Red Die problem, we will consider several scenarios: Scenario A: The patients are drawn at random from the relevant population, and the reason we are uncertain about t is that either (1) there are two versions of the test, with different false positive rates, and we don't know which test was used, or (2) there are two groups of people, the false positive rate is different for different groups, and we don't know which group the patient is in. Scenario B: As in Scenario A, the patients are drawn at random from the relevant population, but the reason we are uncertain about t is that previous studies of the test have been contradictory. That is, there is only one version of the test, and we have reason to believe that t is the same for all groups, but we are not sure what the correct value of t is. Scenario C: As in Scenario A, there are two versions of the test or two groups of people. But now the patients are being filtered so we only see the patients who tested positive and we don't know how many patients tested negative. For example, suppose you are a specialist and patients are only referred to you after they test positive. Scenario D: As in Scenario B, we have reason to think that t is the same for all patients, and as in Scenario C, we only see patients who test positive and don't know how many tested negative. Scenario A We can represent this scenario with a hierarchical model, where the levels of the hierarchy are: At the top level, the possible values of t and their probabilities. At the bottom level, the probability that the patient is sick or not, conditioned on t. To represent the hierarchy, I'll define a MetaTest, which is a Suite that contains Test objects with different values of t as hypotheses. End of explanation """ q = Fraction(1, 2) t1 = Fraction(2, 10) t2 = Fraction(4, 10) test1 = Test(p, s, t1, 'Test(t=0.2)') test2 = Test(p, s, t2, 'Test(t=0.4)') metatest = MetaTest({test1:q, test2:1-q}) metatest.Print() """ Explanation: To update a MetaTest, we update each of the hypothetical Test objects. The return value from Update is the normalizing constant, which is the total probability of the data under the hypothesis. We use the normalizing constants from the bottom level of the hierarchy as the likelihoods at the top level. Here's how we create the MetaTest for the scenario we described: End of explanation """ metatest.Update('pos') """ Explanation: At the top level, there are two tests, with different values of t. Initially, they are equally likely. When we update the MetaTest, it updates the embedded Test objects and then the MetaTest itself. End of explanation """ metatest.Print() """ Explanation: Here are the results. End of explanation """ def MakeMixture(metapmf, label='mix'): """Make a mixture distribution. Args: metapmf: Pmf that maps from Pmfs to probs. label: string label for the new Pmf. Returns: Pmf object. """ mix = Pmf(label=label) for pmf, p1 in metapmf.Items(): for x, p2 in pmf.Items(): mix.Incr(x, p1 * p2) return mix """ Explanation: Because a positive test is more likely if t=0.4, the positive test is evidence in favor of the hypothesis that t=0.4. This MetaTest object represents what we should believe about t after seeing the test, as well as what we should believe about the probability that the patient is sick. Marginal distributions To compute the probability that the patient is sick, we have to compute the marginal probabilities of sick and notsick, averaging over the possible values of t. The following function computes this distribution: End of explanation """ predictive = MakeMixture(metatest) predictive.Print() """ Explanation: Here's the posterior predictive distribution: End of explanation """ def MakeMetaTest(p, s, pmf_t): """Makes a MetaTest object with the given parameters. p: prevalence s: sensitivity pmf_t: Pmf of possible values for `t` """ tests = {} for t, q in pmf_t.Items(): label = 'Test(t=%s)' % str(t) tests[Test(p, s, t, label)] = q return MetaTest(tests) def Marginal(metatest): """Extracts the marginal distribution of t. """ marginal = Pmf() for test, prob in metatest.Items(): marginal[test.t] = prob return marginal def Conditional(metatest, t): """Extracts the distribution of sick/notsick conditioned on t.""" for test, prob in metatest.Items(): if test.t == t: return test """ Explanation: After seeing the test, the probability that the patient is sick is 0.25, which is the same result we got with t=0.3. Two patients Now suppose you test two patients and they both test positive. What is the probability that they are both sick? To answer that, I define a few more functions to work with Metatests: End of explanation """ pmf_t = Pmf({t1:q, t2:1-q}) metatest = MakeMetaTest(p, s, pmf_t) metatest.Print() """ Explanation: MakeMetaTest makes a MetaTest object starting with a given PMF of t. Marginal extracts the PMF of t from a MetaTest. Conditional takes a specified value for t and returns the PMF of sick and notsick conditioned on t. I'll test these functions using the same parameters from above: End of explanation """ metatest = MakeMetaTest(p, s, pmf_t) metatest.Update('pos') metatest.Print() """ Explanation: Here are the results End of explanation """ Marginal(metatest).Print() """ Explanation: Same as before. Now we can extract the posterior distribution of t. End of explanation """ cond1 = Conditional(metatest, t1) cond1.Print() cond2 = Conditional(metatest, t2) cond2.Print() """ Explanation: Having seen one positive test, we are a little more inclined to believe that t=0.4; that is, that the false positive rate for this patient/test is high. And we can extract the conditional distributions for the patient: End of explanation """ MakeMixture(metatest).Print() """ Explanation: Finally, we can make the posterior marginal distribution of sick/notsick, which is a weighted mixture of the conditional distributions: End of explanation """ convolution = metatest + metatest convolution.Print() """ Explanation: At this point we have a MetaTest that contains our updated information about the test (the distribution of t) and about the patient that tested positive. Now, to compute the probability that both patients are sick, we have to know the distribution of t for both patients. And that depends on details of the scenario. In Scenario A, the reason we are uncertain about t is either (1) there are two versions of the test, with different false positive rates, and we don't know which test was used, or (2) there are two groups of people, the false positive rate is different for different groups, and we don't know which group the patient is in. So the value of t for each patient is an independent choice from pmf_t; that is, if we learn something about t for one patient, that tells us nothing about t for other patients. So if we consider two patients who have tested positive, the MetaTest we just computed represents our belief about each of the two patients independently. To compute the probability that both patients are sick, we can convolve the two distributions. End of explanation """ marginal = MakeMixture(metatest+metatest) marginal.Print() """ Explanation: Then we can compute the posterior marginal distribution of sick/notsick for the two patients: End of explanation """ marginal = MakeMixture(metatest) + MakeMixture(metatest) marginal.Print() """ Explanation: So in Scenario A the probability that both patients are sick is 1/16. As an aside, we could have computed the marginal distributions first and then convolved them, which is computationally more efficient: End of explanation """ from random import random def flip(p): return random() < p def generate_pair_A(p, s, pmf_t): while True: sick1, sick2 = flip(p), flip(p) t = pmf_t.Random() test1 = flip(s) if sick1 else flip(t) t = pmf_t.Random() test2 = flip(s) if sick2 else flip(t) yield test1, test2, sick1, sick2 """ Explanation: We can confirm that this result is correct by simulation. Here's a generator that generates random pairs of patients: End of explanation """ def run_simulation(generator, iters=100000): pmf_t = Pmf([0.2, 0.4]) pair_iterator = generator(0.1, 0.9, pmf_t) outcomes = Pmf() for i in range(iters): test1, test2, sick1, sick2 = next(pair_iterator) if test1 and test2: outcomes[sick1, sick2] += 1 outcomes.Normalize() return outcomes outcomes = run_simulation(generate_pair_A) outcomes.Print() """ Explanation: And here's a function that runs the simulation for a given number of iterations: End of explanation """ metatest1 = MakeMetaTest(p, s, pmf_t) metatest1.Update('pos') metatest1.Print() """ Explanation: As we increase iters, the probablity of (True, True) converges on 1/16, which is what we got from the analysis. Good so far! Scenario B In Scenario B, we have reason to believe the t is the same for all patients, but we are not sure what it is. So each time we see a positive test, we get some information about t for all patients. The first time we see positive test we do the same update as in Scenario A: End of explanation """ marginal = MakeMixture(metatest1) marginal.Print() """ Explanation: And the marginal distribution of sick/notsick is the same: End of explanation """ metatest2 = MakeMetaTest(p, s, Marginal(metatest1)) metatest2.Print() """ Explanation: Now suppose the second patient arrives. We need a new MetaTest that contains the updated information about the test, but no information about the patient other than the prior probability of being sick, p: End of explanation """ metatest2.Update('pos') metatest2.Print() """ Explanation: Now we can update this MetaTest with the result from the second test: End of explanation """ predictive = MakeMixture(metatest2) predictive.Print() """ Explanation: This distribution contains updated information about the test, based on two positive outcomes, and updated information about a patient who has tested positive (once). After seeing two patients with positive tests, the probability that t=0.4 has increased to 25/34, around 74%. For either patient, the probability of being sick is given by the marginal distribution from metatest2: End of explanation """ cond_t1 = Conditional(metatest2, t1) conjunction_t1 = cond_t1 + cond_t1 conjunction_t1.Print() """ Explanation: After two tests, the probability that the patient is sick is slightly lower than after one (4/17 is about 23.5%, compared to 25%). That's because the second positive test increases our belief that the false positive rate is high (t=0.4), which decreases our belief that either patient is sick. Now, to compute the probability that both are sick, we can't just convolve the posterior marginal distribution with itself, as we did in Scenario A, because the selection of t is not independent for the two patients. Instead, we have to make a weighted mixture of conditional distributions. If we know t=t1, we can compute the joint distribution for the two patients: End of explanation """ cond_t2 = Conditional(metatest2, t2) conjunction_t2 = cond_t2 + cond_t2 conjunction_t2.Print() """ Explanation: If we know that t=t1, the probability of sicksick is 0.111. And for t=t2: End of explanation """ posterior_t = Marginal(metatest2) posterior_t[t1] * conjunction_t1['sicksick'] + posterior_t[t2] * conjunction_t2['sicksick'] """ Explanation: If we know that t=t2, the probability of sicksick is 0.04. The overall probability of sicksick is the weighted average of these probabilities: End of explanation """ metapmf = Pmf() for t, prob in Marginal(metatest2).Items(): cond = Conditional(metatest2, t) conjunction = cond + cond metapmf[conjunction] = prob metapmf.Print() """ Explanation: 1/17 is about 0.0588, slightly smaller than in Scenario A (1/16, which is about 0.0667). To compute the probabilities for all four outcomes, I'll make a Metapmf that contains the two conditional distributions. End of explanation """ predictive = MakeMixture(metapmf) predictive.Print() """ Explanation: And finally we can use MakeMixture to compute the weighted averages of the posterior probabilities: End of explanation """ def generate_pair_B(p, s, pmf_t): while True: sick1, sick2 = flip(p), flip(p) t = pmf_t.Random() test1 = flip(s) if sick1 else flip(t) # Here's the difference # t = pmf_t.Random() test2 = flip(s) if sick2 else flip(t) yield test1, test2, sick1, sick2 """ Explanation: To confirm that this result is correct, I'll use the simuation again with a different generator: End of explanation """ outcomes = run_simulation(generate_pair_B) outcomes.Print() """ Explanation: The difference between Scenario A and Scenario B is the line I commented out. In Scenario B, we generate t once and it applies to both patients. End of explanation """ def generate_pair_A(p, s, pmf_t): while True: sick1, sick2 = flip(p), flip(p) t = pmf_t.Random() test1 = flip(s) if sick1 else flip(t) t = pmf_t.Random() test2 = flip(s) if sick2 else flip(t) yield test1, test2, sick1, sick2 """ Explanation: As iters increases, the results from the simulation converge on 1/17. Summary so far In summary: P(sick|pos) P(sicksick|pospos) Scenario A 1/4 = 25% 1/16 = 6.25% Scenario B 1/4 = 25% 1/17 ~= 5.88% If we are only interested in one patient at a time, Scenarios A and B are the same. But for collections of patients, they yield different probabilities. A real scenario might combine elements of A and B; that is, the false positive rate might be different for different people, and we might have some uncertainty about what it is. In that case, the most accurate probability for two patients might be anywhere between 1/16 and 1/17. Scenario C Scenario C is similar to Scenario A: we believe that the false positive rate t might be different for different people, or for different versions of the test. The difference is that in Scenario A we see all patients, sick or not, positive test or not. In Scenario C, we only see patients after they have tested positive, and we don't know how many tested negative. For example, if you are a specialist and patients are referred to you only if they test positive, Scenario C might be a good model of your situation. Before I analyze this scenario, I'll start with a simulation. As a reminder, here's a generator that generates pairs of patients in Scenario A: End of explanation """ def run_simulation(generator, iters=100000): pmf_t = Pmf([0.2, 0.4]) pair_iterator = generator(0.1, 0.9, pmf_t) outcomes = Pmf() for i in range(iters): test1, test2, sick1, sick2 = next(pair_iterator) if test1 and test2: outcomes[sick1, sick2] += 1 outcomes.Normalize() return outcomes """ Explanation: And here's the simulator that uses the generator to estimate the probability that two patients who test positive are both sick. End of explanation """ outcomes = run_simulation(generate_pair_A) outcomes.Print() """ Explanation: As we saw before, this probability converges on $1/16$. End of explanation """ def generate_pair_C(p, s, pmf_t): while True: sick1, sick2 = flip(p), flip(p) t = pmf_t.Random() test1 = flip(s) if sick1 else flip(t) t = pmf_t.Random() test2 = flip(s) if sick2 else flip(t) # here is the difference if test1 and test2: yield test1, test2, sick1, sick2 """ Explanation: Now here's a generator that generates pairs of patients in Scenario C. The difference is that for each pair we check the outcome of the tests; if they are not both positive, we loop back and try again: End of explanation """ outcomes = run_simulation(generate_pair_C) outcomes.Print() """ Explanation: When we run it, it seems like the probability is still 1/16: End of explanation """ def generate_patient_D(p, s, pmf_t): while True: # choose t t = pmf_t.Random() # generate patients until positive test while True: sick = flip(p) test = flip(s) if sick else flip(t) if test: yield test, sick break """ Explanation: If you examine the code, you see that the conditional in generate_pair_C makes no difference because it is redundant with the conditional in run_simulation. In Scenarios A and C, we filter out pairs if they are not both positive; it doesn't matter whether the filtering happens in the generator or the simulator. In fact, Scenarios A and C are identical. In both scenarios, when we see a patient with a positive test, we learn something about the patients (more likely to be sick) and something about the particular test applied to the patients (more likely to generate false positives). This is similar to what we saw in the Red Die problem. In Scenario C, the reddish die is more likely to produce a red outcome, so a red outcome provides evidence that we rolled the reddish die. However, that is not the case with Scenario D. Scenario D As a reminder, Scenario D is similar to B: we have reason to think that t is either 0.2 or 0.4 for everyone. The difference in Scenario D is that we only see patients if they test positive. Here's a generator that generates single patients: End of explanation """ def run_single_simulation(generator, iters=100000): pmf_t = Pmf([0.2, 0.4]) iterator = generator(0.1, 0.9, pmf_t) outcomes = Pmf() for i in range(iters): test, sick = next(iterator) if test: outcomes[sick] += 1 outcomes.Normalize() return outcomes """ Explanation: And here's a simulator that counts the fraction of positive tests that turn out to be sick: End of explanation """ outcomes = run_single_simulation(generate_patient_D) outcomes.Print() """ Explanation: When we run the simulation, it doesn't look like it converges to 1/4 as it does in the other three scenarios. End of explanation """ metatest = MakeMetaTest(p, s, pmf_t) for hypo in metatest: hypo.Update('pos') """ Explanation: So how can we analyze this scenario? The key is to realize that, as in the Red Dice problem, if we roll until we get red, we don't learn anything about the die we rolled, and in this case, if we generate pairs until we get a positive test, we don't learn anything about t. The likelihood of the data (a positive test) is 1, regardless of t. We can compute the probablity the patient is sick by creating a MetaTest and updating only the lower level (the Test objects) but not the upper level (the distribution of t). End of explanation """ Marginal(metatest).Print() """ Explanation: After the update, the marginal distribution of t is unchanged: End of explanation """ Conditional(metatest, t1).Print() Conditional(metatest, t2).Print() """ Explanation: But the conditional probabilities have been updated: End of explanation """ MakeMixture(metatest).Print() """ Explanation: We can use MakeMixture to compute the weighted average of the conditional distributions. End of explanation """ def generate_pair_D(p, s, pmf_t): while True: t = pmf_t.Random() while True: sick1, sick2 = flip(p), flip(p) test1 = flip(s) if sick1 else flip(t) test2 = flip(s) if sick2 else flip(t) if test1 and test2: yield test1, test2, sick1, sick2 break """ Explanation: So in Scenario D, a patient who tests positive has a probability of 4/15 of being sick, which is about 26.7%, and consistent with the simulation. That's a little higher than in the other three Scenarios, because we have less reason to think that t is high. Scenario D, two patients Now let's see what happens with two patients. Here's a generator that generates pairs of patients: End of explanation """ outcomes = run_simulation(generate_pair_D, iters=1000000) outcomes.Print() """ Explanation: And here's what we get when we run the simulation: End of explanation """ def MixConjunctions(metatest): metapmf = Pmf() for t, prob in Marginal(metatest).Items(): cond = Conditional(metatest, t) conjunction = cond + cond metapmf[conjunction] = prob return MakeMixture(metapmf) """ Explanation: It looks like the probability that both patients are sick is higher than 1/16. We can compute the result exactly using the posterior distribution and the same method we used in Scenario B, computing the mixture of two conjunctions: End of explanation """ MixConjunctions(metatest).Print() """ Explanation: Then we'll make a weighted mixture of the conjunctions: End of explanation """ def scenario_a(p, s, pmf_t): metatest = MakeMetaTest(p, s, pmf_t) metatest.Update('pos') single = MakeMixture(metatest) pair = single + single return single, pair single, pair = scenario_a(p, s, pmf_t) single.Print() pair.Print() def scenario_b(p, s, pmf_t): metatest1 = MakeMetaTest(p, s, pmf_t) metatest1.Update('pos') single = MakeMixture(metatest1) metatest2 = MakeMetaTest(p, s, Marginal(metatest1)) metatest2.Update('pos') pair = MixConjunctions(metatest2) return single, pair single, pair = scenario_b(p, s, pmf_t) single.Print() pair.Print() def scenario_d(p, s, pmf_t): metatest = MakeMetaTest(p, s, pmf_t) for hypo in metatest: hypo.Update('pos') single = MakeMixture(metatest) pair = MixConjunctions(metatest) return single, pair single, pair = scenario_d(p, s, pmf_t) single.Print() pair.Print() from sympy import symbols p, s, q, t1, t2 = symbols(['p', 's', 'q', 't1', 't2']) pmf_t = Pmf({t1:q, t2:1-q}) def PrintSymSuite(suite): for hypo, prob in suite.Items(): print(hypo, prob.simplify()) single, pair = scenario_b(p, s, pmf_t) PrintSymSuite(single) PrintSymSuite(pair) """ Explanation: In Scenario D, the probability that both patients are sick is 17/225, or about 0.0755, which is consistent with the simulation and, again, a little higher than in the other scenarios. In summary: P(sick|pos) P(sicksick|pospos) Scenario A 1/4 = 25% 1/16 = 6.25% Scenario B 1/4 = 25% 1/17 ~= 5.88% Scenario C 1/4 = 25% 1/16 = 6.25% Scenario D 4/15 ~= 26.7% 17/225 ~= 7.55% End of explanation """ p = 0.1 s = 0.9 q = 0.5 t1 = 0.2 t2 = 0.4 pmf_t = Pmf({t1:q, t2:1-q}) """ Explanation: Scenarios C and D In Scenario B, I assumed that we see all patients regardless of whether they are sick or not, test positive or not. In that case, when we see a positive test, it provides evidence that the false positive rate is high. As a result, as we see more patients, we get more and more confident about the value of t. I'll demonstrate this with a simulation. Here are the usual parameters: End of explanation """ def generate_patient_all(p, s, t): while True: sick = flip(p) test = flip(s) if sick else flip(t) yield 'pos' if test else 'neg' """ Explanation: And here's a generator that simulates patients for given parameters: End of explanation """ def run_simulation(p, s, pmf_t, iterator): metatest = MakeMetaTest(p, s, pmf_t) for i in range(100): data = next(iterator) metatest = MakeMetaTest(p, s, Marginal(metatest)) metatest.Update(data) Marginal(metatest).Print() """ Explanation: Now we can simulate a doctor who sees 100 patients and updates metatest each time. End of explanation """ t = 0.2 iterator = generate_patient_all(p, s, t) run_simulation(p, s, pmf_t, iterator) """ Explanation: If t is actually 0.2, the doctor eventually becomes convinced that t=0.2 End of explanation """ t = 0.4 iterator = generate_patient_all(p, s, t) run_simulation(p, s, pmf_t, iterator) """ Explanation: And if t is actually 0.4, the doctor eventually becomes convinced that t=0.4 End of explanation """ def generate_patient_posonly(p, s, t): while True: sick = flip(p) test = flip(s) if sick else flip(t) if test: yield 'pos' """ Explanation: So far, so good. But what if the doctor is a specialist who only sees patients after they have tested positive? Here's a generator that simulates this scenario. End of explanation """ t = 0.2 iterator = generate_patient_posonly(p, s, t) run_simulation(p, s, pmf_t, iterator) t = 0.4 iterator = generate_patient_posonly(p, s, t) run_simulation(p, s, pmf_t, iterator) """ Explanation: Now if the doctor applies the same logic as before, updating their belief about the test each time they see a positive test, they are quickly convinced that t is high, regardless of the actual value End of explanation """ def prob_pos(p, s, t, r): yes = p*s + (1-p) * t no = p * (1-s) + (1-p) * (1-t) return yes / (yes + no * r) """ Explanation: So that's not good. We have to figure out how to update our belief about t in this case. I'll define r as the referral rate for patients who test negative. If r=1, we see all patients, as in Scenarios A and B. If r=0 we only see patients who tests positive. If we know p, s, t, and r, we can compute the probability of seeing a patient with a positive test: End of explanation """ p = Fraction(1, 10) s = Fraction(9, 10) t = Fraction(3, 10) q = Fraction(1, 2) t1 = Fraction(2, 10) t2 = Fraction(4, 10) pmf_t = Pmf({t1:q, t2:1-q}) pp1 = prob_pos(p, s, t1, 1) pp2 = prob_pos(p, s, t2, 1) pp1, pp2 """ Explanation: Here are the probabilities of seeing a patient with a positive test for the two values of t: End of explanation """ pmf_t = Pmf({t1:q, t2:1-q}) pmf_t[t1] *= prob_pos(p, s, t1, r=1) pmf_t[t2] *= prob_pos(p, s, t2, r=1) pmf_t.Normalize() pmf_t.Print() """ Explanation: Since these probabilities are the likelihood of the data, we can use them to update our belief about t. Here's what we get with r=1. End of explanation """ pmf_t = Pmf({t1:q, t2:1-q}) pmf_t[t1] *= prob_pos(p, s, t1, 0) pmf_t[t2] *= prob_pos(p, s, t2, 0) pmf_t.Normalize() pmf_t.Print() """ Explanation: And that's consistent with what we saw in Scenarios A and B. But when r=0, we only see patients with positive test. The probability of the data is 1, regardless of t, so the data have no effect on our belief about t. End of explanation """ metatest = MakeMetaTest(p, s, pmf_t) for test in metatest: test.Update('pos') metatest.Print() """ Explanation: To compute the probability that the patient is sick, we can make a MetaTest and update the Test objects it contains, but we don't update the top level of the hierarchy. End of explanation """ predictive = MakeMixture(metatest) predictive.Print() """ Explanation: Now we can generate the predictive distribution as usual: End of explanation """ conjunction = predictive + predictive conjunction.Print() """ Explanation: To compute the probability that two patients who test positive are sick, we have to deal with two cases again. Scenario C If the value of t is independent for all patients, we just compute the convolution of the predictive distribution with itself. End of explanation """ metapmf = Pmf() for t, prob in Marginal(metatest).Items(): cond = Conditional(metatest, t) conjunction = cond + cond metapmf[conjunction] = prob metapmf.Print() MakeMixture(metapmf).Print() 16/255, 17/225 """ Explanation: Scenario D Or, if we think the value of t is the same for all patients, we have to use the same technique we used in Scenario B. End of explanation """
ivannz/study_notes
year_15_16/machine_learning_course/ensemble_practicum/xgboost/XGBoost.ipynb
mit
import time, os, re, zipfile import numpy as np, pandas as pd %matplotlib inline import matplotlib.pyplot as plt """ Explanation: eXtreme Gradient Boosting library (XGBoost) <center>An unfocused introduction by Ivan Nazarov</center> Import the main toolkit. End of explanation """ import sklearn as sk, xgboost as xg # from sklearn.model_selection import train_test_split from sklearn.cross_validation import train_test_split """ Explanation: Now import some ML stuff End of explanation """ random_state = np.random.RandomState( seed = 0x0BADC0DE ) """ Explanation: Mind the seed!! End of explanation """ df_train = pd.read_csv( zipfile.ZipFile( 'train.csv.zip' ).open( 'train.csv' ), index_col = 'id' ) X = np.asanyarray( df_train.drop( 'target', axis = 1 ) ) y = sk.preprocessing.LabelEncoder( ).fit_transform( df_train[ 'target' ] ) """ Explanation: Let's begin this introduction with usage examples. The demonstration uses the dataset, which was originally used in Otto Group Product Classification Challenge. We load the data directly from ZIP archives. End of explanation """ X_train, X_, y_train, y_ = train_test_split( X, y, test_size = 0.25, random_state = random_state ) X_valid, X_test, y_valid, y_test = train_test_split( X_, y_, test_size = 0.5, random_state = random_state ) """ Explanation: As usual do the train-test split. End of explanation """ clf_ = xg.XGBClassifier( n_estimators = 50, gamma = 1.0, max_depth = 1000, objective = "multi:softmax", nthread = -1, silent = False ) """ Explanation: scikit-learn interface Use scikit-learn compatible interface of XGBoost. End of explanation """ clf_.fit( X_train, y_train, eval_set = [ ( X_valid, y_valid ), ], verbose = True ) """ Explanation: Fit the a gradient boosted tree ensemble. End of explanation """ y_predict = clf_.predict( X_test ) y_score = clf_.predict_proba( X_test ) """ Explanation: Now let's validate. End of explanation """ pd.DataFrame( sk.metrics.confusion_matrix( y_test, y_predict ), index = clf_.classes_, columns = clf_.classes_ ) """ Explanation: Let's check out the confusuion matrix End of explanation """ fig = plt.figure( figsize = ( 16, 9 ) ) axis = fig.add_subplot( 111 ) axis.set_title( 'ROC-AUC (ovr) curves for the heldout dataset' ) axis.set_xlabel( "False positive rate" ) ; axis.set_ylabel( "True positive rate" ) axis.set_ylim( -0.01, 1.01 ) ; axis.set_xlim( -0.01, 1.01 ) for cls_ in clf_.classes_ : fpr, tpr, _ = sk.metrics.roc_curve( y_test, y_score[:, cls_], pos_label = cls_ ) axis.plot( fpr, tpr, lw = 2, zorder = cls_, label = "C%d" % ( cls_, ) ) axis.legend( loc = 'lower right', shadow = True, ncol = 3 ) """ Explanation: Let's plot one-vs-all ROC-AUC curves End of explanation """ train_dmat = xg.DMatrix( data = X_train, label = y_train, feature_names = None, feature_types = None ) test_dmat = xg.DMatrix( data = X_test, label = y_test ) """ Explanation: alternative interface Internally XGBoost relies heavily on a custom dataset format DMatrix. It is ... The interface, which is exposed into python has three capabilities: - load datasets in libSVM compatible format; - load SciPy's sparse matrices; - load Numpy's ndarrays. Let's load the train dataset using numpy interface : - data : the matrix of features $X$; - label : the observation labels $y$ (could be categorical or numeric); - missing : a vector of values that encode missing observations; - feature_names : the columns names of $X$; - feature_types : defines the python types of each column of $X$, in case of heterogeneous data; - weight : the vector of nonnegative weights of each observation in the dataset. End of explanation """ xgb_params = { 'bst:max_depth': 2, 'bst:eta': 1, 'silent': 1, 'objective': 'multi:softmax', 'num_class': 9, 'nthread': 2, 'eval_metric': 'auc' } """ Explanation: DMatrix exports several useful methods: - num_col() : returns the number of columns; - num_row() : gets the number of items; - save_binary( fname ) : saves the DMatrix object into a specified file. For a more detailed list, it is useful to have a look at the official manual Having dafined the datasets, it is the right time to initialize the booster. To this end one uses xgboost.Learner class. Among other parameters, its instance is initialized with a dictionary of parameters, which allows for a more flexible booster initialization. End of explanation """ xgbooster_ = xg.train( params = xgb_params, dtrain = train_dmat, num_boost_round = 10, evals = (), obj = None, feval = None, maximize = False, early_stopping_rounds = None, evals_result = None, verbose_eval = True, learning_rates = None, xgb_model = None ) """ Explanation: The xgboost.train class initalizes an appropriate booster, and then fits it on the provided train dataset. Besides the booster parameters and the train DMatrix , the class initializer accepts: - num_boost_round : and interger number of boosting iterations which is the number of trees in the final ensemble; - evals : a list of DMatrix validation datasets to be evaluated during training; - obj : a custom objective function; - feval : a custom evaluation function; - early_stopping_rounds : Activates early stopping, which checks every early_stopping_rounds round(s) if the validation error has decreased in order to continue training; - maximize : a flag, which determines if the objective (feval) should be maximized; - learning_rates : a schedule for learning rates for each boosting round or a function that calculates $\eta$, for the current round; - xgb_model : an XGB model (booster or file), the training of which is to be continued. End of explanation """ y_predict = xgbooster_.predict( test_dmat ) y_score = xgbooster_.predict( test_dmat, output_margin = True ) """ Explanation: The method xgboost.booster.update performs one iteration of gradinet boosting: - dtrain : DMatrix of train dataset; - iteration : the current iteration number; - fobj : a custom objective function touse for this update. The method xboost.booster.boost performs one iteration of boosting on the custom gradient statistics: - dtrain : the DMatrix dataset to operate on; - grad, hess : pair of lists of loss gradients and hessians, respectively, evaluated at each datapoint in dtrain. The method xgboost.booster.predict returns either the learned value, or the index of the target leaf. The parameters are : - data : a DMatrix object storing the input; - output_margin : a flag, determining, if raw untransformed margin values should be returned; - ntree_limit : limit the number of trees used for predicting (defaults to 0, which use all trees); - pred_leaf : determined wether the output should be a matrix of $(n, K)$ of predicted leaf indices, where $K$ is the number of trees in the ensemble. The returned result is a numpy ndarray. End of explanation """ pd.DataFrame( sk.metrics.confusion_matrix( y_test, y_predict ), index = clf_.classes_, columns = clf_.classes_ ) """ Explanation: Besides these methods xgboost.booster exports: load_model( fname ) and save_model( fname ). Let's check out the confusuion matrix End of explanation """ fig = plt.figure( figsize = ( 16, 9 ) ) axis = fig.add_subplot( 111 ) axis.set_title( 'ROC-AUC (ovr) curves for the heldout dataset' ) axis.set_xlabel( "False positive rate" ) ; axis.set_ylabel( "True positive rate" ) axis.set_ylim( -0.01, 1.01 ) ; axis.set_xlim( -0.01, 1.01 ) for cls_ in clf_.classes_ : fpr, tpr, _ = sk.metrics.roc_curve( y_test, y_score[:, cls_], pos_label = cls_ ) axis.plot( fpr, tpr, lw = 2, zorder = cls_, label = "C%d" % ( cls_, ) ) axis.legend( loc = 'lower right', shadow = True, ncol = 3 ) """ Explanation: Let's plot one-vs-all ROC-AUC curves End of explanation """
vitojph/2016progpln
notebooks/1-Intro-Python.ipynb
mit
print('Esto es un mensaje') """ Explanation: Introducción a Python Vamos a hacer una pequeña introducción al lenguaje de programación Python. Para ello, me voy a apoyar principalmente en dos excelentes recursos para aprender Python online que siempre recomiendo: el curso de Python en CodeCademy. el curso Python for the Humanities de Folgert Karsdorp. ¿Qué es esto? Esto es un cuaderno de Jupyter compuesto de una serie de celdas de distintos tipos. Esta primera celda contiene texto formateado. Para ver el código (y editarlo, si es necesario), haz doble clic sobre ella y verás el código Markdown. La siguiente celda, por el contrario, contiene código en Python. Para ejecutarla y ver cómo funciona, selecciónala y pulsa ctrl-ENTER: End of explanation """ # NOTA: en Python, las líneas que comienzan con # son comentarios # El intérprete no las lee. Los humanos sí deberíamos leerlas :-) mivariable = 34 edad = 25 year = 1992 """ Explanation: Variables y tipos de datos En los lenguajes de programación, una variable es un tipo de identificador que almacena o al que se le asigna un determinado valor. Este valor puede ser de distinto tipo, como veremos más adelante. Para asignar valores a variables, utilizamos el signo = con la siguiente sintaxis: nombre_de_variable = valor Veamos un ejemplo: vamos a declarar distintas variables, asignándoles valores: End of explanation """ print(mivariable) print(year) print('mivariable') print('year') print('El niño come manzanas.') print(255666) # esta celda dará error, al menos mientras no declaremos la variable llamada otraVariable print(otraVariable) """ Explanation: En Python podemos utilizar como nombre de variable cualquier secuencia de caracteres alfanuméricos, siempre que comience por una letra del alfabeto y no sea una palabra reservada por el propio lenguaje (típicamente, nombres de funciones y palabras clave). Una vez que hemos asignado valores a nombres de variables, podemos utilizar o recuperar esos valores siempre que lo necesitemos. Para hacer una pequeña prueba, vamos a imprimir por pantalla el valor de las variables declaradas anteriormente. Para imprimir por pantalla, usamos la función print. End of explanation """ # números enteros (integer) hijos = 3 # números reales (float), siempre indicando los decimales con punto precio = 350.25 longitud = 1.5 # cadenas de texto (string), siempre entre comillas simples o dobles nombre = "Pedro" apellidos = 'Sanz Hernández' # valores booleanos (bool): solo pueden ser True o False, escritos tal cual sin comillas animal = True mineral = False # imprimimos algunos de ellos por pantalla print(hijos) print("Me llamo", nombre, apellidos) # fíjate cómo puedo imprimir más de una cosa con la función print print(precio) print(mineral) """ Explanation: En el ejemplo anterior, hemos declarado tres variables diferentes. Sin embargo, el tipo de dato que estábamos almacenando era el mismo: simples números enteros. Como hemos mencionado antes, las variables pueden almacenar distintos tipos de datos. Los principales que vamos a utilizar nosotros son: números enteros: 1, 4542, -38 números reales: 2.3, -0.00000034123, 10.0 cadenas de texto: hola, La niña come manzanas, to/TO be/VB or/CC not/RB to/TO be/VB valores booleanos: True, False Veamos cómo funcionan. End of explanation """ nombre = 'Ana' apellidos = 'Serrano de la Oca' print(nombre, apellidos) """ Explanation: Los valores que contienen las variabes pueden ser reasignados en cualquier momento. Eso sí, solo almacenan el último valor que hayamos asignado. Si reasignamos algunas de las variables declaradas anteriormente e imprimimos sus valores por pantalla, comprobamos que éstos han cambiado: End of explanation """ #suma print(5+6) # resta print(10-3) # multiplicación print(25*6) # división print(2558/800000) # potencias print(2**3) # módulo hace referencia al resto de la división 10/2. print(10%2) print(6+5+12*2) print((6+5+12)*2) print(6+5+(12*2)) """ Explanation: Operaciones aritméticas Podemos utilizar el intérprete de Python como una calculadora. Basta con introducir operaciones aritméticas como operadores que seguramente ya conoces: suma (+), resta (-), multiplicación (*), división (/), potencias (**) y módulo (%). Vamos a imprimir por pantalla el resultado de algunas operaciones. End of explanation """ # una suma sencilla suma = 9+5 print(suma) # calculamos el área de un cuadrado lado = 1.5 area = lado**2 # es equivalente a lado*lado print(area) # vamos a calcular el precio final de un producto cuyo valor es 19,95€ iva = 21 precio = 19.95 precio_final = precio + (precio * iva)/100 print(precio_final) # calculamos la edad actual de una persona ahora = 2016 fechaNacimiento = 1985 edad = ahora - fechaNacimiento print(edad) """ Explanation: Si retomamos lo aprendido anteriormente acerca de las variables, veremos que podemos declarar variables a partir de operaciones matemáticas, por ejemplo: End of explanation """ nombre = "Godofredo de Orléans" oracion = 'GREEN COLORLESS IDEAS SLEEP FURIOUSLY' parrafo = '''En un lugar de la Mancha, de cuyo nombre no quiero acordarme, no ha mucho tiempo que vivía un "hidalgo" de los de lanza en astillero, adarga antigua, rocín flaco y galgo corredor. Una olla de algo más vaca que carnero, salpicón las más noches, duelos y quebrantos los sábados, lentejas los viernes, algún palomino de añadidura los domingos, consumían las tres partes de su hacienda.''' """ Explanation: Gestión de cadenas (strings) Si nuestro objetivo es aprende a programar en Python para procesar texto, las cadenas de caracteres van a ser el principal tipo de dato que vamos a manejar. Como hemos visto antes, las cadenas se identifican en Python porque se declaran entre comillas dobles ("cadena") o simples ('cadena') o, si las cadenas son muy largas y ocupan más de una línea, entre triples pares de comillas. Algunos ejemplos de cadenas: End of explanation """ numero1 = 45852236 numero2 = '45852236' # podemos realizar operaciones aritméticas entre números print(numero1 + 1) # pero no entre cadenas y números: esto da un error print(numero2 + 1) # sin embargo, sí puedo utilizar el operador '+' para concatenar varias cadenas print(numero2 + '1') print('¡Hola' + ' ' + 'amigo!') print("otra", "vez " * 3) print("otra" + "vez" * 3) """ Explanation: ¡OJO! Podemos definir como valores de variables cadenas formadas por secuencias de números, siempre que se declaren entre comillas. Es muy importante entender la diferencia entre: End of explanation """ # guardamos en numero1 un entero numero1 = 45852236 # en numero2, guardamos el valor de numero1 convertido a cadena de texto con la función str() numero2 = str(numero1) # con la función type() imprimimos por pantalla el tipo de dato que guarda una variable print(type(numero1)) # es un entero: int print(type(numero2)) # es una cadena: str print("\nOtros tipos de datos son:") print(type(2.3)) print(type(True)) print(type("")) """ Explanation: Conversión ente cadenas y enteros Otra manera de generar cadenas de texto es utilizando la función str() con un argumento que no sea una cadena. Fíjate en los ejemplos: End of explanation """ # guardamos en numero1 una cadena numero1 = '45852236' # en numero2, guardamos el valor de numero1 convertido a entero con la función int() numero2 = int(numero1) # con la función type() imprimimos por pantalla el tipo de dato que guarda una variable print(type(numero1)) # es una cadena: str print(type(numero2)) # es un entero: int # esto no va a funcionar :-/ no podemos convertir cadenas a enteros otracosa = "hola amigo" print(int(otracosa)) """ Explanation: Para hacer el paso contrario, es decir, transformar cadenas en enteros, podemos utilizar la función int(). End of explanation """ print(len(nombre)) print(len(oracion)) print(len(parrafo)) print(len(nombre)) # devuelve la longitud (en número de caracteres) de una cadena print(oracion.lower()) # transforma una cadena a minúsculas (lowercas) print(oracion) print(parrafo.upper()) # transforma una cadena a mayúsculas (uppercas) print(parrafo) # print('AbCdEfGhIjKlMnÑoPqRsTuVwXyZ'.swapcase()) # transforma mayúsculas a minúsculas y viceversa # incluso funciona con las letras con diacríticos print("áéíóúüûÁÉÍÓÚÛÜ".swapcase()) """ Explanation: Recuerda, cualquier secuencia alfanumérica encerrada entre comillas será tratada como una cadena de caracteres (string) aunque esté formada únicamente por dígitos. Métodos sobre cadenas Existe una serie de funciones y metodos que podemos ejecutar sobre cadenas de texto y que nos van a permitir realizar operaciones y transformar dichas cadenas de distintas maneras, p. ej.: contar el número de caracteres de una cadena (len()) transformar la cadena a mayúsculas (.upper()) o minúsculas (.lower()) Para más info, consulta los métodos que podemos aplicar a las cadenas. End of explanation """ numeros = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] dias = ["lunes", "martes", "miércoles", "jueves", "viernes", "sábado", "domingo"] misCosas = [23.4, True, oracion.lower(), len(nombre)] listaVacia = [] # los imprimimos por pantalla print(numeros) print(dias) print(misCosas) print(listaVacia) print(type(numeros)) print(len(numeros)) print(len(misCosas)) """ Explanation: Listas Las listas de Python son una estructura de datos que almacenan una colección de diferentes tipos de información en forma de secuencia ordenada, bajo un solo nombre de variable. Las listas pueden guardar valores de cualquiera de los tipos que hemos visto anteriormente (cadenas, números y booleanos) e incluso las listas pueden incluir otras listas de manera anidada. Las listas se declaran especificando la colección de elementos entre corchetes y separando los elementos con comas: End of explanation """ print('el primer día de la semana es', dias[0]) print('el tercer día de la semana es ' + dias[2]) print('y el último es', dias[-1]) print('el tercer día de la semana es ' + dias[2] + ' y el sexto día es ' + dias[-2]) print('el tercer día de la semana es', dias[2], 'y el sexto día es', dias[-2]) # podemos utilizar los índices también para reasignar valores a los elementos de una lista # p. ej., cambiamos el primer valor de la lista misCosas misCosas[0] = 44 print(misCosas) # ¡OJO! si intentamos acceder a un índice que no existe, el intérprete nos dará error print(misCosas[8]) """ Explanation: Las listas son estructuras de datos ordenadas, lo que implica que podemos acceder a los elementos individuales a través de un índice (la posición que ocupa en la secuencia). El índice se especifica indicando el nombre de la lista y un entero entre corchetes: nombre_de_lista[índice]. ¡OJO! Los índices de las listas comienzan con 0, no con 1: el primer elemento es nombre_de_lista[0]. End of explanation """ # imprime los tres primeros elementos de la variable dias print(dias[:3]) # imprime elementos del 3º al 6ª de la variable números print(numeros[2:6]) # imprime los tres últimos números de la variable números print(numeros[-3:-1]) print(dias[3:5]) """ Explanation: Podemos acceder a porciones de una lista usando una notación especial en los corchetes del índice. nombre_de_lista[n:m] devuelve la secuencia de elementos de la lista que va desde el índice n hasta el m. End of explanation """ cadena = 'perrogatorana' print(cadena[:5]) print(cadena[5:9]) print(cadena[-4:]) # practica los índices en listas y en cadenas, es muy importante tener solutura pal1 = 'perro' pal2 = 'soles' pal3 = 'azul' pal4 = 'amigos' pal5 = 'cafés' pal6 = 'hola' print(pal1[-1] == 's') print(pal2[-1] == 's') print(pal3[-1] == 's') print(pal4[-1] == 's') print(pal5[-1] == 's') print(pal6[-1] == 's') print("---------------------") print(pal1.endswith('s')) print(pal2.endswith('s')) print(pal3.endswith('s')) print(pal4.endswith('s')) print(pal5.endswith('s')) print(pal6.endswith('s')) """ Explanation: Las cadenas, al igual que las listas, son estructuras ordenadas. No lo hemos mencionado antes, pero podemos utilizar esta misma notación de índices para acceder a elementos y a porciones de una cadena. End of explanation """ # creamos una lista vacía miLista = [] # ¿está realmente vacía? print(len(miLista)) # ¿qué posición ocupa "jueves" o "domingo" en la lista días print(dias.index('jueves')) print(dias.index('domingo')) # esto, sin embargo, dará error print(dias.index('DOMINGO')) miLista = [] # añadimos varios elementos nuevos y vemos cómo va aumentando la lista miLista.append(2) print(miLista) miLista.append('palabra') print(miLista) miLista.append(25**3) print(miLista) # añadimos como cuarto elemento otra lista de elementos: fíjate en la estructura del resultado miLista.append(numeros) print(miLista) print(len(miLista)) # ¿cuántos elementos tiene miLista? # estas dos líneas son equivalentes, pero tienen sutiles diferencias. # aquí, concatenamos con el símbolo + tres cadenas dentro de una misma intrucción print # uno de los elementos es originariamente un número entero, por eso hay que transformarlo antes con str print('la lista miLista tiene ' + str(len(miLista)) + ' elementos') # aquí, imprimimos tres elementos diferentes en una misma instrucción print print('la lista miLista tiene', len(miLista), 'elementos') print(miLista) # insertamos como segundo elemento otro nuevo valor miLista.insert(1, 'segundo elemento') print(miLista) miLista[1] = "second element" print(miLista) # más ejemplos con los métodos insert y append l = [] print(l) l.append('hola') print(l) l.append('adiós') print(l) l.insert(1, 'segundo elemento') print(l) l.insert(2, 'otra cosa') print(l) dias = ["lunes", "martes", "miércoles", "jueves", "viernes", "sábado", "domingo"] numeros = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] print(dias) # eliminarmos el elemento 2 y el elemento "palabra" de miLista dias.pop(2) print(dias) dias.remove('sábado') print(dias) letras = ['a', 'b', 'c', 'a'] print(letras.count('aaaaa')) letras.remove('a') print(letras) print(letras.count('a')) # también puedo eliminar el último elemento de la lista miLista.pop(-1) print(miLista) # más pruebas concatenando listas dias = ["lunes", "martes", "miércoles", "jueves", "viernes", "sábado", "domingo"] dias.extend(numeros) print(dias) # fíjate en que la lista numeros crece de manera indefinida numeros.extend(dias) print(numeros) """ Explanation: Métodos sobre listas Como con las cadenas, en las listas existen una serie de funciones y métodos que podemos ejecutar y que nos van a permitir realizar operaciones y transformar dichas listas de distintas maneras, p. ej.: podemos contar el número de elementos de una lista con la función len(). podemos buscar elementos en una lista sin saber la posición concreta que ocupan con el método .index(). podemos añadir nuevos elementos al final de una lista con el método .append(). podemos añadir nuevos elementos en una posición concreta de una lista con el método .insert(). podemos eliminar elementos de una lista con los métodos .remove() y .pop(). podemos concatenar una lista con otra con el método .extend(). Para más info, consulta los métodos que podemos aplicar a las listas. End of explanation """ # creo tres tripletas, cada una almacenando el nombre, apellido y sexo de unas personas ficticias tripleta1 = ("Carlos", "Pujol", "H") tripleta2 = ("Montse", "Santos", "M") tripleta3 = ("Ana", "Ruiz", "M") # creo una lista de personas y añado las tripletas por orden personas = [] personas.append(tripleta1) personas.append(tripleta2) personas.append(tripleta3) print(personas) # las tuplas son secuencias ordenadas, y como tales podemos acceder a sus elementos # a través de índices numéricos print(len(personas[0])) print(personas[1][1]) """ Explanation: Tuplas Las tuplas de Python son una estructura de datos que almacenan una colección de diferentes tipos de información en forma de secuencia ordenada. Estas tuplas pueden guardar valores de cualquiera de los tipos que hemos visto anteriormente (cadenas, números y booleanos) e incluso otras listas o tuplas de manera anidada. La principal diferencia con las listas es que las tuplas son inmutables: no podemos modificarlas, ni añadiendo o eliminando elementos, ni reordenado, etc. Las tuplas se declaran especificando la colección de elementos entre paréntesis y separando los elementos con comas. End of explanation """ # entre llaves, separando claves y valores con dos puntos, y separando pares con comas victor = {"nombre": "Victor", "apellido": "Peinado", "sexo": "H", "edad": 38} print(victor) # declarando el diccionario como una estructura vacía y añadiendo los pares clave:valor después antonio = {} antonio["nombre"] = "Antonio" antonio["apellido"] = "Santos" antonio["sexo"] = "H" antonio["edad"] = 26 print(antonio) # creo una lista de diccionarios: OJO, la lista sí está ordenada personas = [victor, antonio] print(personas) print(type(personas)) print(type(personas[-1])) print(type(personas[-1]["edad"])) """ Explanation: Diccionarios Los diccionarios de Python son una estructura de datos que almacena una colección de pares clave:valor. La clave es siempre una cadena, y el valor puede contener cualquiera de los tipos que hemos visto anteriormente (cadenas, números y booleanos) e incluso otras listas, tuplas o diccionarios de manera anidada. Al contrario que las listas y las tuplas, los diccionarios no son estructuras ordenadas. Por lo tanto, nos interesará utilizar diccionarios cuando necesitamos almacenar y acceder directamente a información cuyo nombre conozcamos. Podemos definir diccionarios de varias maneras: End of explanation """ print(victor['nombre']) print(antonio['apellido']) print(personas[1]['nombre']) """ Explanation: Podemos acceder a los elementos del diccionaro a través del nombre de la clave: End of explanation """ # ¿cuántos pares clave:valor tiene el diccionario antonio? print(len(antonio)) # dame las claves print(antonio.keys()) # dame los valores print(antonio.values()) # recuerda que para crear nuevos pares clave:valor, basta con asignarles un valor antonio['altura'] = 1.79 # podemos comprobar si el diccionario tiene determinadas claves print('email' in antonio) print('altura' in antonio) print(antonio) # eliminamos la clave altura de antonio antonio.pop('altura') print('altura' in antonio) # y esto ahora devuelve False print(antonio) """ Explanation: Métodos sobre diccionarios Como con otras estructuras de datos, los diccionarios proporcionan una serie de funciones y métodos que podemos ejecutar y que nos van a permitir manipular y acceder a dichos diccionarios de distintas maneras, p. ej.: podemos contar el número de elementos de una diccionario la función len(). podemos recuperar una lista conteniendo las claves de un diccionario con el método .keys(). podemos recuperar una lista conteniendo los valores de un diccionario con el método .values(). podemos eliminar un elemento de un diccionario con el método .pop(). Para más info, consulta los métodos que podemos aplicar a los diccionarios. End of explanation """ # creo un diccionario de la familia Pig, en principio vacío familiaPig = {} # creo varios diccionarios por cada miembro de la familia peppa = {"nombre": "Peppa", "apellido": "Pig", "email": "peppa@pig.com",} george = {"nombre": "George", "apellido": "Pig", "email": "george@pig.com",} daddy = {"nombre": "Daddy", "apellido": "Pig", "email": "daddy@pig.com",} mommy = {"nombre": "Mommy", "apellido": "Pig", "email": "mommy@pig.com",} familiaPig['hija'] = peppa familiaPig['hijo'] = george familiaPig['padre'] = daddy familiaPig['madre'] = mommy print(familiaPig) # ¿Cuál es el email de la madre? print(familiaPig["madre"]["email"]) # ¿Y el nombre de la hija? print(familiaPig["hija"]["nombre"]) # en lugar de un diccionario, puedo crear también una lista con los miembros de la familia ordenados por edad familia = [daddy, mommy, peppa, george] # ahora solo puedo acceder por índices, así que ¿cuál es el email del primer miembro de la familia? print(familia[0]["email"]) # ¿Y el nombre del miembro más joven? print(familia[-1]["nombre"]) # esta celda no hace nada, salvo insertar una imagen externa from IPython.core.display import Image Image('http://peppapigjuegos.com/img/wallpapers/2pag/peppa_pig_family.jpg') """ Explanation: Los diccionarios pueden contener otros diccionarios anidados, lo que conlleva que podemos manejar estructuras bastante complejas. A modo de ejemplo: End of explanation """ pelicula = {} pelicula['titulo'] = "The Arrival" pelicula['ano'] = 2016 pelicula['reparto'] = ["Amy Adams", "Jeremy Renner", "Forest Whitaker",] pelicula['esBuena'] = True print(pelicula) print("----------------------------") # imprime el título print("La película se titula", pelicula['titulo'], ".") # y más datos asociados a la peli print("La película tiene" + str(len(pelicula["reparto"])) + "actores" + ".") # normalmente no necesitaremos esto, pero aquí hay un ejemplo de cómo formatear # cadenas complejas con print print("La película se titula {} y tiene {} actores.".format(pelicula["titulo"], len(pelicula["reparto"]))) # creamos una ficha para una serie serie = {"titulo": "Narcos", "ano":2015, "reparto":["Uno", "Otro"]} print(serie) print(serie["titulo"]) # añadimos más datos serie["esBuena"] = True print(serie) """ Explanation: Ejercicio final en clase En clase se nos ocurre cómo podemos crearnos lista de películas y series de televisión que nos gustan. Definimos fichas de películas y series usando diccionarios. End of explanation """ coleccion = [] coleccion.append(serie) print(coleccion) coleccion.append(pelicula) print(coleccion) # añado una tercera serie, y la meto en la colección serie = {"titulo": "The Expanse", "ano":2015, "reparto":["Fulano", "Mengano"]} coleccion.append(serie) print(coleccion) """ Explanation: Para simular una colección de películas y series vistas en orden cronológico, creo un lista de Pyhton en la que voy incluyendo las fichas de películas a medida que las voy viendo. End of explanation """
hannorein/rebound
ipython_examples/IntegratingArbitraryODEs.ipynb
gpl-3.0
import rebound import numpy as np import matplotlib.pyplot as plt """ Explanation: Integrating arbitrary ODEs Although REBOUND is primarily an N-body integrator, it can also integrate arbitrary ordinary differential equations (ODEs). Even better: it can integrate arbitrary ODEs in parallel with an N-body simulation. This allows you to couple various physical effects such as spin and tides to orbital dynamics. In this example, we are integrating a two planet system and a decoupled harmonic oscillator which is governed by the following ODE: $$ y_0(t)'' = -\frac km y_0(t)$$ or equivalently as a set of 2 first order differential equations $$ \begin{pmatrix} y_0(t)\y_1(t)\end{pmatrix}' = \begin{pmatrix} y_1(t)\- \frac k m y_0(t)\end{pmatrix} $$ End of explanation """ sim = rebound.Simulation() sim.add(m=1) sim.add(a=1.2,m=1e-3,e=0.1) sim.add(a=2.3,m=1e-3,e=0.1) sim.integrator = "BS" """ Explanation: We first set up our N-body simulation. Note that we are using the Gragg-Bulirsch-Stoer integrator (BS). End of explanation """ ode_ho = sim.create_ode(length=2, needs_nbody=False) """ Explanation: We can create an ODE structure. Note that the ODE is linked to the simulation. If you run multiple simulations in parallel, you need to create an ode structure for each of them. End of explanation """ # Mass and spring constants m = 1. k = 10. # Initial conditions ode_ho.y[0] = 1. ode_ho.y[1] = 0. # zero velocity # RHS def derivatives_ho(ode, yDot, y, t): yDot[0] = y[1] yDot[1] = -k/m*y[0] ode_ho.derivatives = derivatives_ho """ Explanation: Next, we setup the ODE structure with the initial conditions and the right hand side (RHS) of the harmonic oscillator: End of explanation """ def energy_ho(ode): return 0.5*k*ode.y[0]**2 + 0.5*m*ode.y[1]**2 """ Explanation: To keep track of how accurate the integration of the harmonic oscillator is, we can calculate the energy which is conserved in the physical system. End of explanation """ times = np.linspace(0.,60.,1000) energies_nbody = np.zeros(len(times)) energies_ho = np.zeros(len(times)) r_nbody = np.zeros(len(times)) x_ho = np.zeros(len(times)) for i, t in enumerate(times): sim.integrate(t) r_nbody[i] = sim.particles[1].d x_ho[i] = ode_ho.y[0] energies_nbody[i] = sim.calculate_energy() energies_ho[i] = energy_ho(ode_ho) """ Explanation: Now we can run the simulation, keeping track of a few quantities along the way. End of explanation """ fig, ax = plt.subplots(1,1) ax.set_xlabel("time") ax.set_ylabel("relative energy error") ax.set_yscale("log") ax.plot(times,np.abs((energies_nbody-energies_nbody[0])/energies_nbody[0]), label="N-body") ax.plot(times,np.abs((energies_ho-energies_ho[0])/energies_ho[0]), label="harmonic oscillator") ax.legend() """ Explanation: Let's plot the relative energy error over time for both the N-body and the harmonic oscillator integration. End of explanation """ fig, ax = plt.subplots(1,1) ax.set_xlabel("time") ax.plot(times,r_nbody, label="planet") ax.plot(times,x_ho, label="harmonic oscillator") ax.legend() """ Explanation: Let us also plot the radius of the inner planet and the position coordinate of the harmonic oscillator. End of explanation """ sim.ri_bs.eps_rel = 1e-8 sim.ri_bs.eps_abs = 1e-8 """ Explanation: The above example is using the BS integrator for both the N-body and the harmonic oscillator integration. The BS integrator has default tolerance parameters set to $10^{-5}$. You can change the relative or absolute tolerance with to get more accurate results: End of explanation """ def derivatives_ho_forced(ode, yDot, y, t): # Now we can access particles and their orbital parameters during sub-steps forcing = np.sin(sim.particles[1].f) # Note that we are using the global sim variable. # Alternatively, one can also access the simulation via # sim = ode.contents.r.contents yDot[0] = y[1] yDot[1] = -k/m*y[0] + forcing ode_ho.derivatives = derivatives_ho_forced """ Explanation: Note that in this example, the harmonic oscillator has a period that is shorter than any orbital timescale. Therefore the timestep is limited by the harmonic oscillator, not the N-body integration. As a result, the N-body integration has an error much smaller than the tolerance parameters. Let us change the simple harmonic oscillator to a forced harmonic oscillator where the forcing depends on phase of a planet. End of explanation """ ode_ho.needs_nbody = True """ Explanation: We explicitly set needs_nbody = False during initialization. We therefore need to tell REBOUND that our ODE now needs access to the particle state during the integrations: End of explanation """ times = np.linspace(65.,120.,1000) for i, t in enumerate(times): sim.integrate(t) r_nbody[i] = sim.particles[1].d x_ho[i] = ode_ho.y[0] energies_nbody[i] = sim.calculate_energy() energies_ho[i] = energy_ho(ode_ho) """ Explanation: Running the integration a bit further, now with the forced harmonic oscillator: End of explanation """ fig, ax = plt.subplots(1,1) ax.set_xlabel("time") ax.plot(times,r_nbody, label="planet") ax.plot(times,x_ho, label="harmonic oscillator") ax.legend() """ Explanation: The harmonic oscillator is now getting forced by the planet. End of explanation """
kmorel/kmorel.github.io
images/vaccine-correlations/vaccinevislie.ipynb
mit
vaccine_data = pandas.read_csv( 'covid19_vaccinations_in_the_united_states.csv', header=2, index_col='State/Territory/Federal Entity', ) print(vaccine_data.columns) vaccine_data.head() """ Explanation: Data read from https://covid.cdc.gov/covid-data-tracker/#vaccinations_vacc-total-admin-rate-total on October 24, 2021. End of explanation """ cases_data = pandas.read_csv( 'united_states_covid19_cases_deaths_and_testing_by_state.csv', header=2, index_col='State/Territory', ) print(cases_data.columns) cases_data.head() summary_table = pandas.DataFrame( { 'Total Deaths': cases_data['Total Deaths'], 'Death Rate': cases_data['7-Day Death Rate per 100000'], 'Total Vaccines': vaccine_data['People Fully Vaccinated by State of Residence'], 'Vaccine Percent': vaccine_data['Percent of Total Pop Fully Vaccinated by State of Residence'] }, index=cases_data.index, ) summary_table = summary_table.drop(index=['United States of America']).dropna() summary_table canvas = toyplot.Canvas(width='4in', height='3in') axes = canvas.cartesian( label='VACCINES KILL!', xlabel='People Vaccinated (millions)', ylabel='COVID-19 Deaths (thousands)', ) x = summary_table['Total Vaccines']/1000000 y = summary_table['Total Deaths']/1000 fit_coef = numpy.polyfit(x, y, 1) fit_x = numpy.array([numpy.min(x), numpy.max(x)]) fit_y = fit_x*fit_coef[0] + fit_coef[1] axes.plot(fit_x, fit_y, color='#BBBBBB') axes.scatterplot(x, y) toyplot.svg.render(canvas, 'vaccines_bad.svg') canvas = toyplot.Canvas(width='4in', height='3in') axes = canvas.cartesian( label='VACCINES SAVE!', xlabel='People Vaccinated (percent)', ylabel='Weekly Deaths (per 100000)', ) x = summary_table['Vaccine Percent'] y = summary_table['Death Rate'] fit_coef = numpy.polyfit(x, y, 1) fit_x = numpy.array([numpy.min(x), numpy.max(x)]) fit_y = fit_x*fit_coef[0] + fit_coef[1] axes.plot(fit_x, fit_y, color='#BBBBBB') axes.scatterplot(x, y) toyplot.svg.render(canvas, 'vaccines_good.svg') """ Explanation: Data read from https://covid.cdc.gov/covid-data-tracker/#cases_deathsinlast7days on October 24, 2021. End of explanation """
Esri/gis-stat-analysis-py-tutor
notebooks/ExtendingArcGISDirectly.ipynb
apache-2.0
import arcpy as ARCPY import numpy as NUM import SSDataObject as SSDO import scipy as SCIPY import pandas as PANDA import pysal as PYSAL """ Explanation: Leveraging Open-Source Python Packages for Data Analysis within the ArcGIS Environment (Direct Integration Strategy) Using NumPy as the common denominator Could use the ArcPy Data Access Module directly, but there are host of issues/information one must take into account: How to deal with projections and other environment settings? How Cursors affect the accounting of features? How to deal with bad records/bad data and error handling? How to honor/account for full field object control? How do I create output features that correspond to my inputs? Points are easy, what about Polygons and Polylines? Spatial Statistics Data Object (SSDataObject) Almost 30 Spatial Statistics Tools written in Python that ${\bf{must}}$ behave like traditional GP Tools Use SSDataObject and your code should adhere The Data Analysis Python Modules PANDAS (Python Data Analysis Library) SciPy (Scientific Python) PySAL (Python Spatial Analysis Library) Basic Imports End of explanation """ inputFC = r'../data/CA_Polygons.shp' ssdo = SSDO.SSDataObject(inputFC) for fieldName, fieldObject in ssdo.allFields.iteritems(): print fieldName, fieldObject.type """ Explanation: Initialize Data Object and Query Attribute Fields End of explanation """ ssdo.obtainData(ssdo.oidName, ['GROWTH', 'PCR1970', 'POPDEN70', 'PERCNOHS']) popInfo = ssdo.fields['POPDEN70'] popData = popInfo.data print popData[0:5] """ Explanation: Select Fields to Read Into NumPy Arrays The Unique ID Field (Object ID in this example) will keep track of the order of your features You have no control over Object ID Fields. It is quick, assures "uniqueness", but can't assume they will not get "scrambled" during copies. To assure full control I advocate the "Add Field (LONG)" --> "Calculate Field (From Object ID)" workflow. End of explanation """ import numpy.random as RAND ARCPY.env.overwriteOutput = True outArray = RAND.normal(0,1, (ssdo.numObs,)) outDict = {} outField = SSDO.CandidateField('STDNORM', 'DOUBLE', outArray, alias = 'My Standard Normal Result') outDict[outField.name] = outField """ Explanation: Adding Results to Input/Output Example: Adding a field of random standard normal values to your input/output Create a Dictionary of Candidate Fields End of explanation """ ssdo.addFields2FC(outDict) """ Explanation: Add New Field to Input Be Carefull! End of explanation """ import os as OS outputFC = OS.path.abspath(r'../data/testMyOutput.shp') ssdo.output2NewFC(outputFC, outDict, appendFields = ['GROWTH', 'PERCNOHS']) del ssdo """ Explanation: Copy Features, Selected Attribute Field(s), New Result Field(s) to Output End of explanation """ ssdo = SSDO.SSDataObject(inputFC) years = NUM.arange(1975, 2015, 5) fieldNames = ['PCR' + str(i) for i in years] fieldNamesAll = fieldNames + ['NEW_NAME', 'SOCAL'] ssdo.obtainData("MYID", fieldNamesAll) ids = [ssdo.order2Master[i] for i in xrange(ssdo.numObs)] convertDictDF = {} for fieldName, fieldObject in ssdo.fields.iteritems(): convertDictDF[fieldName] = fieldObject.data df = PANDA.DataFrame(convertDictDF, index = ids) print df[0:5] """ Explanation: Getting More Advanced - SciPy and PANDAS End of explanation """ groups = df.groupby('SOCAL') print groups.mean() """ Explanation: Using GroupBy for Conditional Statistics Example: One Liner for Average Incomes Based on Southern/Non-Southern California End of explanation """ print groups.median() """ Explanation: Now the Median... End of explanation """ pcr = df.ix[:,1:9] rollMeans = NUM.apply_along_axis(PANDA.rolling_mean, 1, pcr, 4) timeInts = NUM.arange(0, 5) outArray = NUM.empty((ssdo.numObs, 5), float) for i in xrange(ssdo.numObs): outArray[i] = SCIPY.stats.linregress(timeInts, rollMeans[i,3:]) """ Explanation: Example: Calculating the Trend of Rolling Means End of explanation """ outputFC = OS.path.abspath(r'../data/testMyRollingMeanInfo.shp') outFields = [ "SLOPE", "INTERCEPT", "R_SQRAURED", "P_VALUE", "STD_ERR" ] outDict = {} for fieldInd, fieldName in enumerate(outFields): outDict[fieldName] = SSDO.CandidateField(fieldName, "DOUBLE", outArray[:,fieldInd]) ssdo.output2NewFC(outputFC, outDict, fieldOrder = outFields) del ssdo """ Explanation: Write to Output (Same as Always...) End of explanation """ ssdo = SSDO.SSDataObject(inputFC) ssdo.obtainData(ssdo.oidName, ['GROWTH', 'POP1970', 'PERCNOHS']) w = PYSAL.weights.knnW(ssdo.xyCoords, k=5) X = NUM.empty((ssdo.numObs,2), float) X[:,0] = ssdo.fields['GROWTH'].data X[:,1] = ssdo.fields['PERCNOHS'].data floorVal = 1000000.0 floorVar = ssdo.fields['POP1970'].returnDouble() maxp = PYSAL.region.Maxp(w, X, floorVal, floor_variable = floorVar) outArray = NUM.empty((ssdo.numObs,), int) for regionID, orderIDs in enumerate(maxp.regions): outArray[orderIDs] = regionID print regionID, orderIDs outputFC = OS.path.abspath(r'../data/testMaxPInfo.shp') outDict = {} outDict["REGIONID"] = SSDO.CandidateField("REGIONID", "DOUBLE", outArray) ssdo.output2NewFC(outputFC, outDict, appendFields = ['GROWTH', 'POP1970', 'PERCNOHS']) del ssdo """ Explanation: Even More Advanced: PySAL Example: Max(p) Regional Clustering End of explanation """
kdestasio/online_brain_intensive
nipype_tutorial/notebooks/basic_joinnodes.ipynb
gpl-2.0
from nipype import JoinNode, Node, Workflow from nipype.interfaces.utility import Function, IdentityInterface def get_data_from_id(id): """Generate a random number based on id""" import numpy as np return id + np.random.rand() def merge_and_scale_data(data2): """Scale the input list by 1000""" import numpy as np return (np.array(data2) * 1000).tolist() node1 = Node(Function(input_names=['id'], output_names=['data1'], function=get_data_from_id), name='get_data') node1.iterables = ('id', [1, 2, 3]) node2 = JoinNode(Function(input_names=['data2'], output_names=['data_scaled'], function=merge_and_scale_data), name='scale_data', joinsource=node1, joinfield=['data2']) wf = Workflow(name='testjoin') wf.connect(node1, 'data1', node2, 'data2') eg = wf.run() wf.write_graph(graph2use='exec') from IPython.display import Image Image(filename='graph_detailed.dot.png') """ Explanation: <img src="../static/images/joinnode.png" width="240"> JoinNode JoinNode have the opposite effect of iterables. Where iterables split up the execution workflow into many different branches, a JoinNode merges them back into on node. For a more detailed explanation, check out JoinNode, synchronize and itersource from the main homepage. Simple example Let's consider the very simple example depicted at the top of this page: ```python from nipype import Node, JoinNode, Workflow Specify fake input node A a = Node(interface=A(), name="a") Iterate over fake node B's input 'in_file? b = Node(interface=B(), name="b") b.iterables = ('in_file', [file1, file2]) Pass results on to fake node C c = Node(interface=C(), name="c") Join forked execution workflow in fake node D d = JoinNode(interface=D(), joinsource="b", joinfield="in_files", name="d") Put everything into a workflow as usual workflow = Workflow(name="workflow") workflow.connect([(a, b, [('subject', 'subject')]), (b, c, [('out_file', 'in_file')]) (c, d, [('out_file', 'in_files')]) ]) ``` As you can see, setting up a JoinNode is rather simple. The only difference to a normal Node are the joinsource and the joinfield. joinsource specifies from which node the information to join is coming and the joinfield specifies the input field of the JoinNode where the information to join will be entering the node. More realistic example Let's consider another example where we have one node that iterates over 3 different numbers and generates randome numbers. Another node joins those three different numbers (each coming from a separate branch of the workflow) into one list. To make the whole thing a bit more realistic, the second node will use the Function interface to do something with those numbers, before we spit them out again. End of explanation """ res = [node for node in eg.nodes() if 'scale_data' in node.name][0].result res.outputs res.inputs """ Explanation: Now, let's look at the input and output of the joinnode: End of explanation """ def get_data_from_id(id): import numpy as np return id + np.random.rand() def scale_data(data2): import numpy as np return data2 def replicate(data3, nreps=2): return data3 * nreps node1 = Node(Function(input_names=['id'], output_names=['data1'], function=get_data_from_id), name='get_data') node1.iterables = ('id', [1, 2, 3]) node2 = Node(Function(input_names=['data2'], output_names=['data_scaled'], function=scale_data), name='scale_data') node3 = JoinNode(Function(input_names=['data3'], output_names=['data_repeated'], function=replicate), name='replicate_data', joinsource=node1, joinfield=['data3']) wf = Workflow(name='testjoin') wf.connect(node1, 'data1', node2, 'data2') wf.connect(node2, 'data_scaled', node3, 'data3') eg = wf.run() wf.write_graph(graph2use='exec') Image(filename='graph_detailed.dot.png') """ Explanation: Extending to multiple nodes We extend the workflow by using three nodes. Note that even this workflow, the joinsource corresponds to the node containing iterables and the joinfield corresponds to the input port of the JoinNode that aggregates the iterable branches. As before the graph below shows how the execution process is setup. End of explanation """
fastai/course-v3
nbs/dl2/cyclegan_ws.ipynb
apache-2.0
#path = Config().data_path() #! wget https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/summer2winter_yosemite.zip -P {path} #! unzip -q -n {path}/summer2winter_yosemite.zip -d {path} #! rm {path}/summer2winter_yosemite.zip path = Config().data_path()/'summer2winter_yosemite' path.ls() """ Explanation: Data One-time download, uncomment the next cells to get the data. End of explanation """ class ImageTuple(ItemBase): def __init__(self, img1, img2): self.img1,self.img2 = img1,img2 self.obj,self.data = (img1,img2),[-1+2*img1.data,-1+2*img2.data] def apply_tfms(self, tfms, **kwargs): self.img1 = self.img1.apply_tfms(tfms, **kwargs) self.img2 = self.img2.apply_tfms(tfms, **kwargs) return self def to_one(self): return Image(0.5+torch.cat(self.data,2)/2) class TargetTupleList(ItemList): def reconstruct(self, t:Tensor): if len(t.size()) == 0: return t return ImageTuple(Image(t[0]/2+0.5),Image(t[1]/2+0.5)) class ImageTupleList(ImageList): _label_cls=TargetTupleList def __init__(self, items, itemsB=None, **kwargs): self.itemsB = itemsB super().__init__(items, **kwargs) def new(self, items, **kwargs): return super().new(items, itemsB=self.itemsB, **kwargs) def get(self, i): img1 = super().get(i) fn = self.itemsB[random.randint(0, len(self.itemsB)-1)] return ImageTuple(img1, open_image(fn)) def reconstruct(self, t:Tensor): return ImageTuple(Image(t[0]/2+0.5),Image(t[1]/2+0.5)) @classmethod def from_folders(cls, path, folderA, folderB, **kwargs): itemsB = ImageList.from_folder(path/folderB).items res = super().from_folder(path/folderA, itemsB=itemsB, **kwargs) res.path = path return res def show_xys(self, xs, ys, figsize:Tuple[int,int]=(12,6), **kwargs): "Show the `xs` and `ys` on a figure of `figsize`. `kwargs` are passed to the show method." rows = int(math.sqrt(len(xs))) fig, axs = plt.subplots(rows,rows,figsize=figsize) for i, ax in enumerate(axs.flatten() if rows > 1 else [axs]): xs[i].to_one().show(ax=ax, **kwargs) plt.tight_layout() def show_xyzs(self, xs, ys, zs, figsize:Tuple[int,int]=None, **kwargs): """Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`. `kwargs` are passed to the show method.""" figsize = ifnone(figsize, (12,3*len(xs))) fig,axs = plt.subplots(len(xs), 2, figsize=figsize) fig.suptitle('Ground truth / Predictions', weight='bold', size=14) for i,(x,z) in enumerate(zip(xs,zs)): x.to_one().show(ax=axs[i,0], **kwargs) z.to_one().show(ax=axs[i,1], **kwargs) data = (ImageTupleList.from_folders(path, 'trainA', 'trainB') .split_none() .label_empty() .transform(get_transforms(), size=128) .databunch(bs=4)) data.show_batch(rows=2) """ Explanation: See this tutorial for a detailed walkthrough of how/why this custom ItemList was created. End of explanation """ def convT_norm_relu(ch_in:int, ch_out:int, norm_layer:nn.Module, ks:int=3, stride:int=2, bias:bool=True): return [nn.ConvTranspose2d(ch_in, ch_out, kernel_size=ks, stride=stride, padding=1, output_padding=1, bias=bias), norm_layer(ch_out), nn.ReLU(True)] def pad_conv_norm_relu(ch_in:int, ch_out:int, pad_mode:str, norm_layer:nn.Module, ks:int=3, bias:bool=True, pad=1, stride:int=1, activ:bool=True, init:Callable=nn.init.kaiming_normal_)->List[nn.Module]: layers = [] if pad_mode == 'reflection': layers.append(nn.ReflectionPad2d(pad)) elif pad_mode == 'border': layers.append(nn.ReplicationPad2d(pad)) p = pad if pad_mode == 'zeros' else 0 conv = nn.Conv2d(ch_in, ch_out, kernel_size=ks, padding=p, stride=stride, bias=bias) if init: init(conv.weight) if hasattr(conv, 'bias') and hasattr(conv.bias, 'data'): conv.bias.data.fill_(0.) layers += [conv, norm_layer(ch_out)] if activ: layers.append(nn.ReLU(inplace=True)) return layers class ResnetBlock(nn.Module): def __init__(self, dim:int, pad_mode:str='reflection', norm_layer:nn.Module=None, dropout:float=0., bias:bool=True): super().__init__() assert pad_mode in ['zeros', 'reflection', 'border'], f'padding {pad_mode} not implemented.' norm_layer = ifnone(norm_layer, nn.InstanceNorm2d) layers = pad_conv_norm_relu(dim, dim, pad_mode, norm_layer, bias=bias) if dropout != 0: layers.append(nn.Dropout(dropout)) layers += pad_conv_norm_relu(dim, dim, pad_mode, norm_layer, bias=bias, activ=False) self.conv_block = nn.Sequential(*layers) def forward(self, x): return x + self.conv_block(x) def resnet_generator(ch_in:int, ch_out:int, n_ftrs:int=64, norm_layer:nn.Module=None, dropout:float=0., n_blocks:int=6, pad_mode:str='reflection')->nn.Module: norm_layer = ifnone(norm_layer, nn.InstanceNorm2d) bias = (norm_layer == nn.InstanceNorm2d) layers = pad_conv_norm_relu(ch_in, n_ftrs, 'reflection', norm_layer, pad=3, ks=7, bias=bias) for i in range(2): layers += pad_conv_norm_relu(n_ftrs, n_ftrs *2, 'zeros', norm_layer, stride=2, bias=bias) n_ftrs *= 2 layers += [ResnetBlock(n_ftrs, pad_mode, norm_layer, dropout, bias) for _ in range(n_blocks)] for i in range(2): layers += convT_norm_relu(n_ftrs, n_ftrs//2, norm_layer, bias=bias) n_ftrs //= 2 layers += [nn.ReflectionPad2d(3), nn.Conv2d(n_ftrs, ch_out, kernel_size=7, padding=0), nn.Tanh()] return nn.Sequential(*layers) resnet_generator(3, 3) def conv_norm_lr(ch_in:int, ch_out:int, norm_layer:nn.Module=None, ks:int=3, bias:bool=True, pad:int=1, stride:int=1, activ:bool=True, slope:float=0.2, init:Callable=nn.init.kaiming_normal_)->List[nn.Module]: conv = nn.Conv2d(ch_in, ch_out, kernel_size=ks, padding=pad, stride=stride, bias=bias) if init: init(conv.weight) if hasattr(conv, 'bias') and hasattr(conv.bias, 'data'): conv.bias.data.fill_(0.) layers = [conv] if norm_layer is not None: layers.append(norm_layer(ch_out)) if activ: layers.append(nn.LeakyReLU(slope, inplace=True)) return layers def discriminator(ch_in:int, n_ftrs:int=64, n_layers:int=3, norm_layer:nn.Module=None, sigmoid:bool=False)->nn.Module: norm_layer = ifnone(norm_layer, nn.InstanceNorm2d) bias = (norm_layer == nn.InstanceNorm2d) layers = conv_norm_lr(ch_in, n_ftrs, ks=4, stride=2, pad=1) for i in range(n_layers-1): new_ftrs = 2*n_ftrs if i <= 3 else n_ftrs layers += conv_norm_lr(n_ftrs, new_ftrs, norm_layer, ks=4, stride=2, pad=1, bias=bias) n_ftrs = new_ftrs new_ftrs = 2*n_ftrs if n_layers <=3 else n_ftrs layers += conv_norm_lr(n_ftrs, new_ftrs, norm_layer, ks=4, stride=1, pad=1, bias=bias) layers.append(nn.Conv2d(new_ftrs, 1, kernel_size=4, stride=1, padding=1)) if sigmoid: layers.append(nn.Sigmoid()) return nn.Sequential(*layers) discriminator(3) """ Explanation: Models We use the models that were introduced in the cycleGAN paper. End of explanation """ class CycleGAN(nn.Module): def __init__(self, ch_in:int, ch_out:int, n_features:int=64, disc_layers:int=3, gen_blocks:int=6, lsgan:bool=True, drop:float=0., norm_layer:nn.Module=None): super().__init__() self.D_A = discriminator(ch_in, n_features, disc_layers, norm_layer, sigmoid=not lsgan) self.D_B = discriminator(ch_in, n_features, disc_layers, norm_layer, sigmoid=not lsgan) self.G_A = resnet_generator(ch_in, ch_out, n_features, norm_layer, drop, gen_blocks) self.G_B = resnet_generator(ch_in, ch_out, n_features, norm_layer, drop, gen_blocks) #G_A: takes real input B and generates fake input A #G_B: takes real input A and generates fake input B #D_A: trained to make the difference between real input A and fake input A #D_B: trained to make the difference between real input B and fake input B def forward(self, real_A, real_B): fake_A, fake_B = self.G_A(real_B), self.G_B(real_A) if not self.training: return torch.cat([fake_A[:,None],fake_B[:,None]], 1) idt_A, idt_B = self.G_A(real_A), self.G_B(real_B) #Needed for the identity loss during training. return [fake_A, fake_B, idt_A, idt_B] """ Explanation: We group two discriminators and two generators in a single model, then a Callback will take care of training them properly. End of explanation """ class AdaptiveLoss(nn.Module): def __init__(self, crit): super().__init__() self.crit = crit def forward(self, output, target:bool, **kwargs): targ = output.new_ones(*output.size()) if target else output.new_zeros(*output.size()) return self.crit(output, targ, **kwargs) """ Explanation: AdaptiveLoss is a wrapper around a PyTorch loss function to compare an output of any size with a single number (0. or 1.). It will generate a target with the same shape as the output. A discriminator returns a feature map, and we want it to predict zeros (or ones) for each feature. End of explanation """ class CycleGanLoss(nn.Module): def __init__(self, cgan:nn.Module, lambda_A:float=10., lambda_B:float=10, lambda_idt:float=0.5, lsgan:bool=True): super().__init__() self.cgan,self.l_A,self.l_B,self.l_idt = cgan,lambda_A,lambda_B,lambda_idt self.crit = AdaptiveLoss(F.mse_loss if lsgan else F.binary_cross_entropy) def set_input(self, input): self.real_A,self.real_B = input def forward(self, output, target): fake_A, fake_B, idt_A, idt_B = output #Generators should return identity on the datasets they try to convert to self.id_loss = self.l_idt * (self.l_B * F.l1_loss(idt_A, self.real_B) + self.l_A * F.l1_loss(idt_B, self.real_A)) #Generators are trained to trick the discriminators so the following should be ones self.gen_loss = self.crit(self.cgan.D_A(fake_A), True) + self.crit(self.cgan.D_B(fake_B), True) #Cycle loss self.cyc_loss = self.l_A * F.l1_loss(self.cgan.G_A(fake_B), self.real_A) self.cyc_loss += self.l_B * F.l1_loss(self.cgan.G_B(fake_A), self.real_B) return self.id_loss+self.gen_loss+self.cyc_loss """ Explanation: The main loss used to train the generators. It has three parts: - the classic GAN loss: they must make the critics believe their images are real - identity loss: if they are given an image from the set they are trying to imitate, they should return the same thing - cycle loss: if an image from A goes through the generator that imitates B then through the generator that imitates A, it should be the same as the initial image. Same for B and switching the generators End of explanation """ class CycleGANTrainer(LearnerCallback): _order = -20 #Need to run before the Recorder def _set_trainable(self, D_A=False, D_B=False): gen = (not D_A) and (not D_B) requires_grad(self.learn.model.G_A, gen) requires_grad(self.learn.model.G_B, gen) requires_grad(self.learn.model.D_A, D_A) requires_grad(self.learn.model.D_B, D_B) if not gen: self.opt_D_A.lr, self.opt_D_A.mom = self.learn.opt.lr, self.learn.opt.mom self.opt_D_A.wd, self.opt_D_A.beta = self.learn.opt.wd, self.learn.opt.beta self.opt_D_B.lr, self.opt_D_B.mom = self.learn.opt.lr, self.learn.opt.mom self.opt_D_B.wd, self.opt_D_B.beta = self.learn.opt.wd, self.learn.opt.beta def on_train_begin(self, **kwargs): self.G_A,self.G_B = self.learn.model.G_A,self.learn.model.G_B self.D_A,self.D_B = self.learn.model.D_A,self.learn.model.D_B self.crit = self.learn.loss_func.crit if not getattr(self,'opt_G',None): self.opt_G = self.learn.opt.new([nn.Sequential(*flatten_model(self.G_A), *flatten_model(self.G_B))]) else: self.opt_G.lr,self.opt_G.wd = self.opt.lr,self.opt.wd self.opt_G.mom,self.opt_G.beta = self.opt.mom,self.opt.beta if not getattr(self,'opt_D_A',None): self.opt_D_A = self.learn.opt.new([nn.Sequential(*flatten_model(self.D_A))]) if not getattr(self,'opt_D_B',None): self.opt_D_B = self.learn.opt.new([nn.Sequential(*flatten_model(self.D_B))]) self.learn.opt.opt = self.opt_G.opt self._set_trainable() self.id_smter,self.gen_smter,self.cyc_smter = SmoothenValue(0.98),SmoothenValue(0.98),SmoothenValue(0.98) self.da_smter,self.db_smter = SmoothenValue(0.98),SmoothenValue(0.98) self.recorder.add_metric_names(['id_loss', 'gen_loss', 'cyc_loss', 'D_A_loss', 'D_B_loss']) def on_batch_begin(self, last_input, **kwargs): self.learn.loss_func.set_input(last_input) def on_backward_begin(self, **kwargs): self.id_smter.add_value(self.loss_func.id_loss.detach().cpu()) self.gen_smter.add_value(self.loss_func.gen_loss.detach().cpu()) self.cyc_smter.add_value(self.loss_func.cyc_loss.detach().cpu()) def on_batch_end(self, last_input, last_output, **kwargs): self.G_A.zero_grad(); self.G_B.zero_grad() fake_A, fake_B = last_output[0].detach(), last_output[1].detach() real_A, real_B = last_input self._set_trainable(D_A=True) self.D_A.zero_grad() loss_D_A = 0.5 * (self.crit(self.D_A(real_A), True) + self.crit(self.D_A(fake_A), False)) self.da_smter.add_value(loss_D_A.detach().cpu()) loss_D_A.backward() self.opt_D_A.step() self._set_trainable(D_B=True) self.D_B.zero_grad() loss_D_B = 0.5 * (self.crit(self.D_B(real_B), True) + self.crit(self.D_B(fake_B), False)) self.db_smter.add_value(loss_D_B.detach().cpu()) loss_D_B.backward() self.opt_D_B.step() self._set_trainable() def on_epoch_end(self, last_metrics, **kwargs): return add_metrics(last_metrics, [s.smooth for s in [self.id_smter,self.gen_smter,self.cyc_smter, self.da_smter,self.db_smter]]) """ Explanation: The main callback to train a cycle GAN. The training loop will train the generators (so learn.opt is given those parameters) while the critics are trained by the callback during on_batch_end. End of explanation """ cycle_gan = CycleGAN(3,3, gen_blocks=9) learn = Learner(data, cycle_gan, loss_func=CycleGanLoss(cycle_gan), opt_func=partial(optim.Adam, betas=(0.5,0.99)), callback_fns=[CycleGANTrainer]) learn.lr_find() learn.recorder.plot() learn.fit(100, 1e-4) learn.save('100fit') learn = learn.load('100fit') """ Explanation: Training End of explanation """ learn.show_results(ds_type=DatasetType.Train, rows=10) learn.show_results(ds_type=DatasetType.Train, rows=10) """ Explanation: Let's look at some results using Learner.show_results. End of explanation """ len(learn.data.train_ds.items),len(learn.data.train_ds.itemsB) def get_batch(filenames, tfms, **kwargs): samples = [open_image(fn) for fn in filenames] for s in samples: s = s.apply_tfms(tfms, **kwargs) batch = torch.stack([s.data for s in samples], 0).cuda() return 2. * (batch - 0.5) fnames = learn.data.train_ds.items[:8] x = get_batch(fnames, get_transforms()[1], size=128) learn.model.eval() tfms = get_transforms()[1] bs = 16 def get_losses(fnames, gen, crit, bs=16): losses_in,losses_out = [],[] with torch.no_grad(): for i in progress_bar(range(0, len(fnames), bs)): xb = get_batch(fnames[i:i+bs], tfms, size=128) fakes = gen(xb) preds_in,preds_out = crit(xb),crit(fakes) loss_in = learn.loss_func.crit(preds_in, True,reduction='none') loss_out = learn.loss_func.crit(preds_out,True,reduction='none') losses_in.append(loss_in.view(loss_in.size(0),-1).mean(1)) losses_out.append(loss_out.view(loss_out.size(0),-1).mean(1)) return torch.cat(losses_in),torch.cat(losses_out) losses_A = get_losses(data.train_ds.x.items, learn.model.G_B, learn.model.D_B) losses_B = get_losses(data.train_ds.x.itemsB, learn.model.G_A, learn.model.D_A) def show_best(fnames, losses, gen, n=8): sort_idx = losses.argsort().cpu() _,axs = plt.subplots(n//2, 4, figsize=(12,2*n)) xb = get_batch(fnames[sort_idx][:n], tfms, size=128) with torch.no_grad(): fakes = gen(xb) xb,fakes = (1+xb.cpu())/2,(1+fakes.cpu())/2 for i in range(n): axs.flatten()[2*i].imshow(xb[i].permute(1,2,0)) axs.flatten()[2*i].axis('off') axs.flatten()[2*i+1].imshow(fakes[i].permute(1,2,0)) axs.flatten()[2*i+1].set_title(losses[sort_idx][i].item()) axs.flatten()[2*i+1].axis('off') show_best(data.train_ds.x.items, losses_A[1], learn.model.G_B) show_best(data.train_ds.x.itemsB, losses_B[1], learn.model.G_A) show_best(data.train_ds.x.items, losses_A[1]-losses_A[0], learn.model.G_B) """ Explanation: Now let's go through all the images of the training set and find the ones that are the best converted (according to our critics) or the worst converted. End of explanation """
vberthiaume/vblandr
udacity/udacity/3_regularization.ipynb
apache-2.0
# These are all the modules we'll be using later. Make sure you can import them before proceeding further. from __future__ import print_function import numpy as np import tensorflow as tf from six.moves import cPickle as pickle """ Explanation: Deep Learning Assignment 3 Previously in 2_fullyconnected.ipynb, you trained a logistic regression and a neural network model. The goal of this assignment is to explore regularization techniques. End of explanation """ pickle_file = 'notMNIST.pickle' with open(pickle_file, 'rb') as f: save = pickle.load(f) train_dataset = save['train_dataset'] train_labels = save['train_labels'] valid_dataset = save['valid_dataset'] valid_labels = save['valid_labels'] test_dataset = save['test_dataset'] test_labels = save['test_labels'] del save # hint to help gc free up memory print('Training set', train_dataset.shape, train_labels.shape) print('Validation set', valid_dataset.shape, valid_labels.shape) print('Test set', test_dataset.shape, test_labels.shape) """ Explanation: First reload the data we generated in notmist.ipynb. End of explanation """ image_size = 28 num_labels = 10 #this is just like in previous ass def reformat(dataset, labels): dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32) # Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...] labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32) return dataset, labels train_dataset, train_labels = reformat(train_dataset, train_labels) valid_dataset, valid_labels = reformat(valid_dataset, valid_labels) test_dataset, test_labels = reformat(test_dataset, test_labels) print('Training set', train_dataset.shape, train_labels.shape) print('Validation set', valid_dataset.shape, valid_labels.shape) print('Test set', test_dataset.shape, test_labels.shape) def accuracy(predictions, labels): return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0]) """ Explanation: Reformat into a shape that's more adapted to the models we're going to train: - data as a flat matrix, - labels as float 1-hot encodings. End of explanation """
adrn/GaiaPairsFollowup
paper/figures/Create-tgas-fits.ipynb
mit
from os import path # Third-party from astropy.io import ascii from astropy.table import Table import astropy.coordinates as coord import astropy.units as u from astropy.constants import G, c import matplotlib.pyplot as plt from matplotlib.colors import Normalize import numpy as np plt.style.use('apw-notebook') %matplotlib inline import sqlalchemy from gwb.data import TGASData from comoving_rv.log import logger from comoving_rv.db import Session, Base, db_connect from comoving_rv.db.model import (Run, Observation, TGASSource, SimbadInfo, PriorRV, SpectralLineInfo, SpectralLineMeasurement, RVMeasurement, GroupToObservations) """ Explanation: Creates row-matched FITS files for input to the mixture-model script. data/tgas_apw1.fits and data/tgas_apw2.fits for our RV measurements data/tgas_rave1.fits and data/tgas_rave2.fits for RAVE RV measurements End of explanation """ # base_path = '/Volumes/ProjectData/gaia-comoving-followup/' base_path = '../../data/' db_path = path.join(base_path, 'db.sqlite') engine = db_connect(db_path) session = Session() base_q = session.query(Observation).join(RVMeasurement).filter(RVMeasurement.rv != None) group_ids = np.array([x[0] for x in session.query(Observation.group_id).distinct().all() if x[0] is not None and x[0] > 0 and x[0] != 10]) len(group_ids) star1_dicts = [] star2_dicts = [] for gid in np.unique(group_ids): try: gto = session.query(GroupToObservations).filter(GroupToObservations.group_id == gid).one() obs1 = base_q.filter(Observation.id == gto.observation1_id).one() obs2 = base_q.filter(Observation.id == gto.observation2_id).one() except sqlalchemy.orm.exc.NoResultFound: print('Skipping group {0}'.format(gid)) continue raw_rv_diff = (obs1.measurements[0].x0 - obs2.measurements[0].x0) / 6563. * c.to(u.km/u.s) mean_rv = np.mean([obs1.rv_measurement.rv.value, obs2.rv_measurement.rv.value]) * obs2.rv_measurement.rv.unit rv1 = mean_rv + raw_rv_diff/2. rv_err1 = obs1.measurements[0].x0_error / 6563. * c.to(u.km/u.s) rv2 = mean_rv - raw_rv_diff/2. rv_err2 = obs2.measurements[0].x0_error / 6563. * c.to(u.km/u.s) # ------- # Star 1: row_dict = dict() star1 = obs1.tgas_star() for k in star1._data.dtype.names: if k in ['J', 'J_err', 'H', 'H_err', 'Ks', 'Ks_err']: continue row_dict[k] = star1._data[k] row_dict['RV'] = rv1.to(u.km/u.s).value row_dict['RV_err'] = rv_err1.to(u.km/u.s).value row_dict['group_id'] = gid star1_dicts.append(row_dict) # ------- # Star 2: row_dict = dict() star2 = obs2.tgas_star() for k in star2._data.dtype.names: if k in ['J', 'J_err', 'H', 'H_err', 'Ks', 'Ks_err']: continue row_dict[k] = star2._data[k] row_dict['RV'] = rv2.to(u.km/u.s).value row_dict['RV_err'] = rv_err2.to(u.km/u.s).value row_dict['group_id'] = gid star2_dicts.append(row_dict) tbl1 = Table(star1_dicts) tbl2 = Table(star2_dicts) tbl1.write('../../data/tgas_apw1.fits', overwrite=True) tbl2.write('../../data/tgas_apw2.fits', overwrite=True) """ Explanation: APW RV's End of explanation """ tgas = TGASData('../../../gaia-comoving-stars/data/stacked_tgas.fits') star = ascii.read('../../../gaia-comoving-stars/paper/t1-1-star.txt') rave_stars = star[(star['group_size'] == 2) & (~star['rv'].mask)] rave_stars = rave_stars.group_by('group_id') """ Explanation: RAVE End of explanation """ group_idx = np.array([i for i,g in enumerate(rave_stars.groups) if len(g) > 1]) rave_stars = rave_stars.groups[group_idx] star1_dicts = [] star2_dicts = [] for gid in np.unique(rave_stars['group_id']): rows = rave_stars[rave_stars['group_id'] == gid] if len(rows) != 2: print("skipping group {0} ({1})".format(gid, len(rows))) continue i1 = np.where(tgas._data['source_id'] == rows[0]['tgas_source_id'])[0][0] i2 = np.where(tgas._data['source_id'] == rows[1]['tgas_source_id'])[0][0] star1 = tgas[i1] star2 = tgas[i2] # ------- # Star 1: row_dict = dict() for k in star1._data.dtype.names: if k in ['J', 'J_err', 'H', 'H_err', 'Ks', 'Ks_err']: continue row_dict[k] = star1._data[k] row_dict['RV'] = rows[0]['rv'] row_dict['RV_err'] = rows[0]['erv'] row_dict['group_id'] = gid star1_dicts.append(row_dict) # ------- # Star 2: row_dict = dict() for k in star2._data.dtype.names: if k in ['J', 'J_err', 'H', 'H_err', 'Ks', 'Ks_err']: continue row_dict[k] = star2._data[k] row_dict['RV'] = rows[1]['rv'] row_dict['RV_err'] = rows[1]['erv'] row_dict['group_id'] = gid star2_dicts.append(row_dict) tbl1 = Table(star1_dicts) tbl2 = Table(star2_dicts) print(len(tbl1)) tbl1.write('../../data/tgas_rave1.fits', overwrite=True) tbl2.write('../../data/tgas_rave2.fits', overwrite=True) """ Explanation: Get only ones where both stars have RV measurements End of explanation """
liumengjun/cn-deep-learning
ipnd-neural-network/Your_first_neural_network.ipynb
mit
%matplotlib inline %config InlineBackend.figure_format = 'retina' import numpy as np import pandas as pd import matplotlib.pyplot as plt """ Explanation: Your first neural network In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more. End of explanation """ data_path = 'Bike-Sharing-Dataset/hour.csv' rides = pd.read_csv(data_path) rides.head() """ Explanation: Load and prepare the data A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon! End of explanation """ rides[:24*10].plot(x='dteday', y='cnt') """ Explanation: Checking out the data This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above. Below is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model. End of explanation """ dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday'] for each in dummy_fields: dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False) rides = pd.concat([rides, dummies], axis=1) fields_to_drop = ['instant', 'dteday', 'season', 'weathersit', 'weekday', 'atemp', 'mnth', 'workingday', 'hr'] data = rides.drop(fields_to_drop, axis=1) data.head() """ Explanation: Dummy variables Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies(). End of explanation """ quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed'] # Store scalings in a dictionary so we can convert back later scaled_features = {} for each in quant_features: mean, std = data[each].mean(), data[each].std() scaled_features[each] = [mean, std] data.loc[:, each] = (data[each] - mean)/std """ Explanation: Scaling target variables To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1. The scaling factors are saved so we can go backwards when we use the network for predictions. End of explanation """ # Save data for approximately the last 21 days test_data = data[-21*24:] # Now remove the test data from the data set data = data[:-21*24] # Separate the data into features and targets target_fields = ['cnt', 'casual', 'registered'] features, targets = data.drop(target_fields, axis=1), data[target_fields] test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields] """ Explanation: Splitting the data into training, testing, and validation sets We'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders. End of explanation """ # Hold out the last 60 days or so of the remaining data as a validation set train_features, train_targets = features[:-60*24], targets[:-60*24] val_features, val_targets = features[-60*24:], targets[-60*24:] """ Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set). End of explanation """ class NeuralNetwork(object): def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Initialize weights self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, (self.input_nodes, self.hidden_nodes)) self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) self.lr = learning_rate #### TODO: Set self.activation_function to your implemented sigmoid function #### # # Note: in Python, you can define a function with a lambda expression, # as shown below. self.activation_function = lambda x : 0 # Replace 0 with your sigmoid calculation. ### If the lambda code above is not something you're familiar with, # You can uncomment out the following three lines and put your # implementation there instead. # #def sigmoid(x): # return 0 # Replace 0 with your sigmoid calculation here #self.activation_function = sigmoid def train(self, features, targets): ''' Train the network on batch of features and targets. Arguments --------- features: 2D array, each row is one data record, each column is a feature targets: 1D array of target values ''' n_records = features.shape[0] delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape) delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape) for X, y in zip(features, targets): #### Implement the forward pass here #### ### Forward pass ### # TODO: Hidden layer - Replace these values with your calculations. hidden_inputs = None # signals into hidden layer hidden_outputs = None # signals from hidden layer # TODO: Output layer - Replace these values with your calculations. final_inputs = None # signals into final output layer final_outputs = None # signals from final output layer #### Implement the backward pass here #### ### Backward pass ### # TODO: Output error - Replace this value with your calculations. error = None # Output layer error is the difference between desired target and actual output. # TODO: Calculate the hidden layer's contribution to the error hidden_error = None # TODO: Backpropagated error terms - Replace these values with your calculations. output_error_term = None hidden_error_term = None # Weight step (input to hidden) delta_weights_i_h += None # Weight step (hidden to output) delta_weights_h_o += None # TODO: Update the weights - Replace these values with your calculations. self.weights_hidden_to_output += None # update hidden-to-output weights with gradient descent step self.weights_input_to_hidden += None # update input-to-hidden weights with gradient descent step def run(self, features): ''' Run a forward pass through the network with input features Arguments --------- features: 1D array of feature values ''' #### Implement the forward pass here #### # TODO: Hidden layer - replace these values with the appropriate calculations. hidden_inputs = None # signals into hidden layer hidden_outputs = None # signals from hidden layer # TODO: Output layer - Replace these values with the appropriate calculations. final_inputs = None # signals into final output layer final_outputs = None # signals from final output layer return final_outputs """ Explanation: OPTIONAL: Time to build the network Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes. <img src="assets/neural_network.png" width=300px> The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation. We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation. Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$. Below, you have these tasks: 1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function. 2. Implement the forward pass in the train method. 3. Implement the backpropagation algorithm in the train method, including calculating the output error. 4. Implement the forward pass in the run method. End of explanation """ import unittest inputs = [0.5, -0.2, 0.1] targets = [0.4] test_w_i_h = np.array([[0.1, 0.4, -0.3], [-0.2, 0.5, 0.2]]) test_w_h_o = np.array([[0.3, -0.1]]) class TestMethods(unittest.TestCase): ########## # Unit tests for data loading ########## def test_data_path(self): # Test that file path to dataset has been unaltered self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv') def test_data_loaded(self): # Test that data frame loaded self.assertTrue(isinstance(rides, pd.DataFrame)) ########## # Unit tests for network functionality ########## def test_activation(self): network = NeuralNetwork(3, 2, 1, 0.5) # Test that the activation function is a sigmoid self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5)))) def test_train(self): # Test that weights are updated correctly on training network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() network.train(inputs, targets) self.assertTrue(np.allclose(network.weights_hidden_to_output, np.array([[ 0.37275328, -0.03172939]]))) self.assertTrue(np.allclose(network.weights_input_to_hidden, np.array([[ 0.10562014, 0.39775194, -0.29887597], [-0.20185996, 0.50074398, 0.19962801]]))) def test_run(self): # Test correctness of run method network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() self.assertTrue(np.allclose(network.run(inputs), 0.09998924)) suite = unittest.TestLoader().loadTestsFromModule(TestMethods()) unittest.TextTestRunner().run(suite) """ Explanation: OPTIONAL: Unit tests Run these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project. End of explanation """ def MSE(y, Y): return np.mean((y-Y)**2) #Delete the following line if you have successfully implemented the NeuralNetowrk in the optional session from NN import NeuralNetwork import sys ### Set the hyperparameters here ### iterations = 5000 learning_rate = 0.1 hidden_nodes = 16 output_nodes = 1 N_i = train_features.shape[1] network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate) losses = {'train':[], 'validation':[]} for e in range(iterations): if (e > 500): network.lr = 0.01 if (e> 2000): network.lr = 0.001 # Go through a random batch of 128 records from the training data set batch = np.random.choice(train_features.index, size=128) for record, target in zip(train_features.ix[batch].values, train_targets.ix[batch]['cnt']): network.train(record, target) # Printing out the training progress train_loss = MSE(network.run(train_features), train_targets['cnt'].values) val_loss = MSE(network.run(val_features), val_targets['cnt'].values) sys.stdout.write("\rProgress: " + str(100 * e/float(iterations))[:4] \ + "% ... Training loss: " + str(train_loss)[:5] \ + " ... Validation loss: " + str(val_loss)[:5]) losses['train'].append(train_loss) losses['validation'].append(val_loss) plt.plot(losses['train'], label='Training loss') plt.plot(losses['validation'], label='Validation loss') plt.legend() _ = plt.ylim() """ Explanation: Training the network Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops. You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later. Choose the number of iterations This is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, if you use too many iterations, then the model with not generalize well to other data, this is called overfitting. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. As you start overfitting, you'll see the training loss continue to decrease while the validation loss starts to increase. Choose the learning rate This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge. Choose the number of hidden nodes The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose. End of explanation """ fig, ax = plt.subplots(figsize=(8,4)) mean, std = scaled_features['cnt'] predictions = network.run(test_features)*std + mean ax.plot(predictions[0], label='Prediction') ax.plot((test_targets['cnt']*std + mean).values, label='Data') ax.set_xlim(right=len(predictions)) ax.legend() dates = pd.to_datetime(rides.ix[test_data.index]['dteday']) dates = dates.apply(lambda d: d.strftime('%b %d')) ax.set_xticks(np.arange(len(dates))[12::24]) _ = ax.set_xticklabels(dates[12::24], rotation=45) """ Explanation: Check out your predictions Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly. End of explanation """
postBG/DL_project
weight-initialization/weight_initialization.ipynb
mit
%matplotlib inline import tensorflow as tf import helper from tensorflow.examples.tutorials.mnist import input_data print('Getting MNIST Dataset...') mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) print('Data Extracted.') """ Explanation: Weight Initialization In this lesson, you'll learn how to find good initial weights for a neural network. Having good initial weights can place the neural network close to the optimal solution. This allows the neural network to come to the best solution quicker. Testing Weights Dataset To see how different weights perform, we'll test on the same dataset and neural network. Let's go over the dataset and neural network. We'll be using the MNIST dataset to demonstrate the different initial weights. As a reminder, the MNIST dataset contains images of handwritten numbers, 0-9, with normalized input (0.0 - 1.0). Run the cell below to download and load the MNIST dataset. End of explanation """ # Save the shapes of weights for each layer layer_1_weight_shape = (mnist.train.images.shape[1], 256) layer_2_weight_shape = (256, 128) layer_3_weight_shape = (128, mnist.train.labels.shape[1]) """ Explanation: Neural Network <img style="float: left" src="images/neural_network.png"/> For the neural network, we'll test on a 3 layer neural network with ReLU activations and an Adam optimizer. The lessons you learn apply to other neural networks, including different activations and optimizers. End of explanation """ all_zero_weights = [ tf.Variable(tf.zeros(layer_1_weight_shape)), tf.Variable(tf.zeros(layer_2_weight_shape)), tf.Variable(tf.zeros(layer_3_weight_shape)) ] all_one_weights = [ tf.Variable(tf.ones(layer_1_weight_shape)), tf.Variable(tf.ones(layer_2_weight_shape)), tf.Variable(tf.ones(layer_3_weight_shape)) ] helper.compare_init_weights( mnist, 'All Zeros vs All Ones', [ (all_zero_weights, 'All Zeros'), (all_one_weights, 'All Ones')]) """ Explanation: Initialize Weights Let's start looking at some initial weights. All Zeros or Ones If you follow the principle of Occam's razor, you might think setting all the weights to 0 or 1 would be the best solution. This is not the case. With every weight the same, all the neurons at each layer are producing the same output. This makes it hard to decide which weights to adjust. Let's compare the loss with all ones and all zero weights using helper.compare_init_weights. This function will run two different initial weights on the neural network above for 2 epochs. It will plot the loss for the first 100 batches and print out stats after the 2 epochs (~860 batches). We plot the first 100 batches to better judge which weights performed better at the start. Run the cell below to see the difference between weights of all zeros against all ones. End of explanation """ helper.hist_dist('Random Uniform (minval=-3, maxval=3)', tf.random_uniform([1000], -3, 3)) """ Explanation: As you can see the accuracy is close to guessing for both zeros and ones, around 10%. The neural network is having a hard time determining which weights need to be changed, since the neurons have the same output for each layer. To avoid neurons with the same output, let's use unique weights. We can also randomly select these weights to avoid being stuck in a local minimum for each run. A good solution for getting these random weights is to sample from a uniform distribution. Uniform Distribution A [uniform distribution](https://en.wikipedia.org/wiki/Uniform_distribution_(continuous%29) has the equal probability of picking any number from a set of numbers. We'll be picking from a continous distribution, so the chance of picking the same number is low. We'll use TensorFlow's tf.random_uniform function to pick random numbers from a uniform distribution. tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None) Outputs random values from a uniform distribution. The generated values follow a uniform distribution in the range [minval, maxval). The lower bound minval is included in the range, while the upper bound maxval is excluded. shape: A 1-D integer Tensor or Python array. The shape of the output tensor. minval: A 0-D Tensor or Python value of type dtype. The lower bound on the range of random values to generate. Defaults to 0. maxval: A 0-D Tensor or Python value of type dtype. The upper bound on the range of random values to generate. Defaults to 1 if dtype is floating point. dtype: The type of the output: float32, float64, int32, or int64. seed: A Python integer. Used to create a random seed for the distribution. See tf.set_random_seed for behavior. name: A name for the operation (optional). We can visualize the uniform distribution by using a histogram. Let's map the values from tf.random_uniform([1000], -3, 3) to a histogram using the helper.hist_dist function. This will be 1000 random float values from -3 to 3, excluding the value 3. End of explanation """ # Default for tf.random_uniform is minval=0 and maxval=1 basline_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape)), tf.Variable(tf.random_uniform(layer_2_weight_shape)), tf.Variable(tf.random_uniform(layer_3_weight_shape)) ] helper.compare_init_weights( mnist, 'Baseline', [(basline_weights, 'tf.random_uniform [0, 1)')]) """ Explanation: The histogram used 500 buckets for the 1000 values. Since the chance for any single bucket is the same, there should be around 2 values for each bucket. That's exactly what we see with the histogram. Some buckets have more and some have less, but they trend around 2. Now that you understand the tf.random_uniform function, let's apply it to some initial weights. Baseline Let's see how well the neural network trains using the default values for tf.random_uniform, where minval=0.0 and maxval=1.0. End of explanation """ uniform_neg1to1_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -1, 1)), tf.Variable(tf.random_uniform(layer_2_weight_shape, -1, 1)), tf.Variable(tf.random_uniform(layer_3_weight_shape, -1, 1)) ] helper.compare_init_weights( mnist, '[0, 1) vs [-1, 1)', [ (basline_weights, 'tf.random_uniform [0, 1)'), (uniform_neg1to1_weights, 'tf.random_uniform [-1, 1)')]) """ Explanation: The loss graph is showing the neural network is learning, which it didn't with all zeros or all ones. We're headed in the right direction. General rule for setting weights The general rule for setting the weights in a neural network is to be close to zero without being too small. A good pracitce is to start your weights in the range of $[-y, y]$ where $y=1/\sqrt{n}$ ($n$ is the number of inputs to a given neuron). Let's see if this holds true, let's first center our range over zero. This will give us the range [-1, 1). End of explanation """ uniform_neg01to01_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.1, 0.1)), tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.1, 0.1)), tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.1, 0.1)) ] uniform_neg001to001_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.01, 0.01)), tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.01, 0.01)), tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.01, 0.01)) ] uniform_neg0001to0001_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.001, 0.001)), tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.001, 0.001)), tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.001, 0.001)) ] helper.compare_init_weights( mnist, '[-1, 1) vs [-0.1, 0.1) vs [-0.01, 0.01) vs [-0.001, 0.001)', [ (uniform_neg1to1_weights, '[-1, 1)'), (uniform_neg01to01_weights, '[-0.1, 0.1)'), (uniform_neg001to001_weights, '[-0.01, 0.01)'), (uniform_neg0001to0001_weights, '[-0.001, 0.001)')], plot_n_batches=None) """ Explanation: We're going in the right direction, the accuracy and loss is better with [-1, 1). We still want smaller weights. How far can we go before it's too small? Too small Let's compare [-0.1, 0.1), [-0.01, 0.01), and [-0.001, 0.001) to see how small is too small. We'll also set plot_n_batches=None to show all the batches in the plot. End of explanation """ import numpy as np general_rule_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -1/np.sqrt(layer_1_weight_shape[0]), 1/np.sqrt(layer_1_weight_shape[0]))), tf.Variable(tf.random_uniform(layer_2_weight_shape, -1/np.sqrt(layer_2_weight_shape[0]), 1/np.sqrt(layer_2_weight_shape[0]))), tf.Variable(tf.random_uniform(layer_3_weight_shape, -1/np.sqrt(layer_3_weight_shape[0]), 1/np.sqrt(layer_3_weight_shape[0]))) ] helper.compare_init_weights( mnist, '[-0.1, 0.1) vs General Rule', [ (uniform_neg01to01_weights, '[-0.1, 0.1)'), (general_rule_weights, 'General Rule')], plot_n_batches=None) """ Explanation: Looks like anything [-0.01, 0.01) or smaller is too small. Let's compare this to our typical rule of using the range $y=1/\sqrt{n}$. End of explanation """ helper.hist_dist('Random Normal (mean=0.0, stddev=1.0)', tf.random_normal([1000])) """ Explanation: The range we found and $y=1/\sqrt{n}$ are really close. Since the uniform distribution has the same chance to pick anything in the range, what if we used a distribution that had a higher chance of picking numbers closer to 0. Let's look at the normal distribution. Normal Distribution Unlike the uniform distribution, the normal distribution has a higher likelihood of picking number close to it's mean. To visualize it, let's plot values from TensorFlow's tf.random_normal function to a histogram. tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None) Outputs random values from a normal distribution. shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A 0-D Tensor or Python value of type dtype. The mean of the normal distribution. stddev: A 0-D Tensor or Python value of type dtype. The standard deviation of the normal distribution. dtype: The type of the output. seed: A Python integer. Used to create a random seed for the distribution. See tf.set_random_seed for behavior. name: A name for the operation (optional). End of explanation """ normal_01_weights = [ tf.Variable(tf.random_normal(layer_1_weight_shape, stddev=0.1)), tf.Variable(tf.random_normal(layer_2_weight_shape, stddev=0.1)), tf.Variable(tf.random_normal(layer_3_weight_shape, stddev=0.1)) ] helper.compare_init_weights( mnist, 'Uniform [-0.1, 0.1) vs Normal stddev 0.1', [ (uniform_neg01to01_weights, 'Uniform [-0.1, 0.1)'), (normal_01_weights, 'Normal stddev 0.1')]) """ Explanation: Let's compare the normal distribution against the previous uniform distribution. End of explanation """ helper.hist_dist('Truncated Normal (mean=0.0, stddev=1.0)', tf.truncated_normal([1000])) """ Explanation: The normal distribution gave a slight increasse in accuracy and loss. Let's move closer to 0 and drop picked numbers that are x number of standard deviations away. This distribution is called Truncated Normal Distribution. Truncated Normal Distribution tf.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None) Outputs random values from a truncated normal distribution. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A 0-D Tensor or Python value of type dtype. The mean of the truncated normal distribution. stddev: A 0-D Tensor or Python value of type dtype. The standard deviation of the truncated normal distribution. dtype: The type of the output. seed: A Python integer. Used to create a random seed for the distribution. See tf.set_random_seed for behavior. name: A name for the operation (optional). End of explanation """ trunc_normal_01_weights = [ tf.Variable(tf.truncated_normal(layer_1_weight_shape, stddev=0.1)), tf.Variable(tf.truncated_normal(layer_2_weight_shape, stddev=0.1)), tf.Variable(tf.truncated_normal(layer_3_weight_shape, stddev=0.1)) ] helper.compare_init_weights( mnist, 'Normal vs Truncated Normal', [ (normal_01_weights, 'Normal'), (trunc_normal_01_weights, 'Truncated Normal')]) """ Explanation: Again, let's compare the previous results with the previous distribution. End of explanation """ helper.compare_init_weights( mnist, 'Baseline vs Truncated Normal', [ (basline_weights, 'Baseline'), (trunc_normal_01_weights, 'Truncated Normal')]) """ Explanation: There's no difference between the two, but that's because the neural network we're using is too small. A larger neural network will pick more points on the normal distribution, increasing the likelihood it's choices are larger than 2 standard deviations. We've come a long way from the first set of weights we tested. Let's see the difference between the weights we used then and now. End of explanation """
ledrui/Regression
week3/week-3-polynomial-regression-assignment-blank.ipynb
mit
import graphlab """ Explanation: Regression Week 3: Assessing Fit (polynomial regression) In this notebook you will compare different regression models in order to assess which model fits best. We will be using polynomial regression as a means to examine this topic. In particular you will: * Write a function to take an SArray and a degree and return an SFrame where each column is the SArray to a polynomial value up to the total degree e.g. degree = 3 then column 1 is the SArray column 2 is the SArray squared and column 3 is the SArray cubed * Use matplotlib to visualize polynomial regressions * Use matplotlib to visualize the same polynomial degree on different subsets of the data * Use a validation set to select a polynomial degree * Assess the final fit using test data We will continue to use the House data from previous notebooks. Fire up graphlab create End of explanation """ tmp = graphlab.SArray([1., 2., 3.]) tmp_cubed = tmp.apply(lambda x: x**3) print tmp print tmp_cubed """ Explanation: Next we're going to write a polynomial function that takes an SArray and a maximal degree and returns an SFrame with columns containing the SArray to all the powers up to the maximal degree. The easiest way to apply a power to an SArray is to use the .apply() and lambda x: functions. For example to take the example array and compute the third power we can do as follows: (note running this cell the first time may take longer than expected since it loads graphlab) End of explanation """ ex_sframe = graphlab.SFrame() ex_sframe['power_1'] = tmp print ex_sframe """ Explanation: We can create an empty SFrame using graphlab.SFrame() and then add any columns to it with ex_sframe['column_name'] = value. For example we create an empty SFrame and make the column 'power_1' to be the first power of tmp (i.e. tmp itself). End of explanation """ def polynomial_sframe(feature, degree): # assume that degree >= 1 # initialize the SFrame: poly_sframe = graphlab.SFrame() # and set poly_sframe['power_1'] equal to the passed feature poly_sframe['power_1'] = feature # first check if degree > 1 if degree > 1: # then loop over the remaining degrees: # range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree for power in range(2, degree+1): # first we'll give the column a name: name = 'power_' + str(power) # then assign poly_sframe[name] to the appropriate power of feature poly_sframe[name] = feature**power return poly_sframe """ Explanation: Polynomial_sframe function Using the hints above complete the following function to create an SFrame consisting of the powers of an SArray up to a specific degree: End of explanation """ print polynomial_sframe(tmp, 3) """ Explanation: To test your function consider the smaller tmp variable and what you would expect the outcome of the following call: End of explanation """ sales = graphlab.SFrame('kc_house_data.gl/') """ Explanation: Visualizing polynomial regression Let's use matplotlib to visualize what a polynomial regression looks like on some real data. End of explanation """ sales = sales.sort(['sqft_living', 'price']) """ Explanation: As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices. End of explanation """ poly1_data = polynomial_sframe(sales['sqft_living'], 1) poly1_data['price'] = sales['price'] # add price to the data since it's the target """ Explanation: Let's start with a degree 1 polynomial using 'sqft_living' (i.e. a line) to predict 'price' and plot what it looks like. End of explanation """ model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = ['power_1'], validation_set = None) #let's take a look at the weights before we plot model1.get("coefficients") import matplotlib.pyplot as plt %matplotlib inline plt.plot(poly1_data['power_1'],poly1_data['price'],'.', poly1_data['power_1'], model1.predict(poly1_data),'-') """ Explanation: NOTE: for all the models in this notebook use validation_set = None to ensure that all results are consistent across users. End of explanation """ poly2_data = polynomial_sframe(sales['sqft_living'], 2) my_features = poly2_data.column_names() # get the name of the features poly2_data['price'] = sales['price'] # add price to the data since it's the target model2 = graphlab.linear_regression.create(poly2_data, target = 'price', features = my_features, validation_set = None) model2.get("coefficients") plt.plot(poly2_data['power_1'],poly2_data['price'],'.', poly2_data['power_1'], model2.predict(poly2_data),'-') """ Explanation: Let's unpack that plt.plot() command. The first pair of SArrays we passed are the 1st power of sqft and the actual price we then ask it to print these as dots '.'. The next pair we pass is the 1st power of sqft and the predicted values from the linear model. We ask these to be plotted as a line '-'. We can see, not surprisingly, that the predicted values all fall on a line, specifically the one with slope 280 and intercept -43579. What if we wanted to plot a second degree polynomial? End of explanation """ poly3_data = polynomial_sframe(sales['sqft_living'], 3) my_features = poly3_data.column_names() # get the name of the features poly3_data['price'] = sales['price'] # add price to the data since it's the target model3 = graphlab.linear_regression.create(poly3_data, target = 'price', features = my_features, validation_set = None) plt.plot(poly3_data['power_1'],poly3_data['price'],'.', poly3_data['power_1'], model3.predict(poly3_data),'-') """ Explanation: The resulting model looks like half a parabola. Try on your own to see what the cubic looks like: End of explanation """ poly15_data = polynomial_sframe(sales['sqft_living'], 15) my_features = poly15_data.column_names() # get the name of the features poly15_data['price'] = sales['price'] # add price to the data since it's the target model15 = graphlab.linear_regression.create(poly15_data, target = 'price', features = my_features, validation_set = None) plt.plot(poly15_data['power_1'],poly15_data['price'],'.', poly15_data['power_1'], model15.predict(poly15_data),'-') """ Explanation: Now try a 15th degree polynomial: End of explanation """ (temp_1, temp_2) = sales.random_split(0.5, seed=0) (set_1, set_2) = temp_1.random_split(0.5, seed=0) (set_3, set_4) = temp_2.random_split(0.5, seed=0) """ Explanation: What do you think of the 15th degree polynomial? Do you think this is appropriate? If we were to change the data do you think you'd get pretty much the same curve? Let's take a look. Changing the data and re-learning We're going to split the sales data into four subsets of roughly equal size. Then you will estimate a 15th degree polynomial model on all four subsets of the data. Print the coefficients (you should use .print_rows(num_rows = 16) to view all of them) and plot the resulting fit (as we did above). The quiz will ask you some questions about these results. To split the sales data into four subsets, we perform the following steps: * First split sales into 2 subsets with .random_split(0.5, seed=0). * Next split the resulting subsets into 2 more subsets each. Use .random_split(0.5, seed=0). We set seed=0 in these steps so that different users get consistent results. You should end up with 4 subsets (set_1, set_2, set_3, set_4) of approximately equal size. End of explanation """ # Set_1 poly15_set_1_data = polynomial_sframe(set_1['sqft_living'], 15) my_features = poly15_set_1_data.column_names() # get the name of the features poly15_set_1_data['price'] = set_1['price'] # add price to the data since it's the target model_set_1 = graphlab.linear_regression.create(poly15_set_1_data, target = 'price', features = my_features, validation_set = None) model_set_1.get('coefficients').print_rows(num_rows=16) # Ploting plt.plot(poly15_set_1_data['power_1'],poly15_set_1_data['price'],'.', poly15_set_1_data['power_1'], model_set_1.predict(poly15_set_1_data),'-') # Set_2 poly15_set_2_data = polynomial_sframe(set_2['sqft_living'], 15) my_features = poly15_set_2_data.column_names() # get the name of the features poly15_set_2_data['price'] = set_2['price'] # add price to the data since it's the target model_set_2 = graphlab.linear_regression.create(poly15_set_2_data, target = 'price', features = my_features, validation_set = None) model_set_2.get('coefficients').print_rows(num_rows=16) # Ploting plt.plot(poly15_set_2_data['power_1'],poly15_set_2_data['price'],'.', poly15_set_2_data['power_1'], model_set_2.predict(poly15_set_2_data),'-') # Set_3 poly15_set_3_data = polynomial_sframe(set_3['sqft_living'], 15) my_features = poly15_set_3_data.column_names() # get the name of the features poly15_set_3_data['price'] = set_3['price'] # add price to the data since it's the target model_set_3 = graphlab.linear_regression.create(poly15_set_3_data, target = 'price', features = my_features, validation_set = None) model_set_3.get('coefficients').print_rows(num_rows=16) # Ploting plt.plot(poly15_set_3_data['power_1'],poly15_set_3_data['price'],'.', poly15_set_3_data['power_1'], model_set_3.predict(poly15_set_3_data),'-') # Set_4 poly15_set_4_data = polynomial_sframe(set_4['sqft_living'], 15) my_features = poly15_set_4_data.column_names() # get the name of the features poly15_set_4_data['price'] = set_4['price'] # add price to the data since it's the target model_set_4 = graphlab.linear_regression.create(poly15_set_4_data, target = 'price', features = my_features, validation_set = None) model_set_4.get('coefficients').print_rows(num_rows=16) # Ploting plt.plot(poly15_set_4_data['power_1'],poly15_set_4_data['price'],'.', poly15_set_4_data['power_1'], model_set_4.predict(poly15_set_4_data),'-') """ Explanation: Fit a 15th degree polynomial on set_1, set_2, set_3, and set_4 using sqft_living to predict prices. Print the coefficients and make a plot of the resulting model. End of explanation """ (training_and_validation, testing) = sales.random_split(0.9, seed=1) (training, validation) = training_and_validation.random_split(0.5, seed=1) """ Explanation: Some questions you will be asked on your quiz: Quiz Question: Is the sign (positive or negative) for power_15 the same in all four models? Quiz Question: (True/False) the plotted fitted lines look the same in all four plots Selecting a Polynomial Degree Whenever we have a "magic" parameter like the degree of the polynomial there is one well-known way to select these parameters: validation set. (We will explore another approach in week 4). We split the sales dataset 3-way into training set, test set, and validation set as follows: Split our sales data into 2 sets: training_and_validation and testing. Use random_split(0.9, seed=1). Further split our training data into two sets: training and validation. Use random_split(0.5, seed=1). Again, we set seed=1 to obtain consistent results for different users. End of explanation """ for degree in xrange(1, 16): poly_data = polynomial_sframe(training['sqft_living'], degree) my_features = poly_data.column_names() # get the name of the features poly_data['price'] = training['price'] # add price to the data since it's the model = graphlab.linear_regression.create(poly_data, target = 'price', features = my_features, validation_set = None, verbose=False) validation_data = polynomial_sframe(validation['sqft_living'], degree) validation_data['price'] = validation['price'] residual =validation['price'] - model.predict(validation_data) rs = residual.apply(lambda x: x**2) rss = rs.sum() print "RSS ", degree, rss """ Explanation: Next you should write a loop that does the following: * For degree in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] (to get this in python type range(1, 15+1)) * Build an SFrame of polynomial data of train_data['sqft_living'] at the current degree * hint: my_features = poly_data.column_names() gives you a list e.g. ['power_1', 'power_2', 'power_3'] which you might find useful for graphlab.linear_regression.create( features = my_features) * Add train_data['price'] to the polynomial SFrame * Learn a polynomial regression model to sqft vs price with that degree on TRAIN data * Compute the RSS on VALIDATION data (here you will want to use .predict()) for that degree and you will need to make a polynmial SFrame using validation data. * Report which degree had the lowest RSS on validation data (remember python indexes from 0) (Note you can turn off the print out of linear_regression.create() with verbose = False) End of explanation """ # RSS on test data poly_data_selected = polynomial_sframe(training['sqft_living'], 6) my_features = poly_data_selected.column_names() # get the name of the features poly_data_selected['price'] = training['price'] # add price to the data since it's the model_selected = graphlab.linear_regression.create(poly_data_selected, target = 'price', features = my_features, validation_set = None, verbose=False) test_data = polynomial_sframe(testing['sqft_living'], 6) test_data['price'] = testing['price'] residual_1 = testing['price'] - model.predict(test_data) rs = residual_1.apply(lambda x: x**2) rss = rs.sum() print "RSS ", rss """ Explanation: Quiz Question: Which degree (1, 2, …, 15) had the lowest RSS on Validation data? Now that you have chosen the degree of your polynomial using validation data, compute the RSS of this model on TEST data. Report the RSS on your quiz. End of explanation """ for i in range(1,15+1): current_degree = polynomial_sframe(training['sqft_living'], i) my_features = current_degree.column_names() # get the name of the features current_degree['price'] = training['price'] # add price to the data since it's the target model = graphlab.linear_regression.create(current_degree, target = 'price', features = my_features, validation_set = None,verbose = False) ##Compute the RSS on VALIDATION data validation_data = polynomial_sframe(validation['sqft_living'], i) predictions = model.predict(validation_data) residuals = validation['price'] - predictions rss_1 = sum(residuals * residuals) print i print rss_1 """ Explanation: Quiz Question: what is the RSS on TEST data for the model with the degree selected from Validation data? (Make sure you got the correct degree from the previous question) Testing other routines End of explanation """
tuanavu/coursera-university-of-washington
machine_learning/2_regression/assignment/week1/week-1-simple-regression-assignment-blank.ipynb
mit
import graphlab """ Explanation: Regression Week 1: Simple Linear Regression In this notebook we will use data on house sales in King County to predict house prices using simple (one input) linear regression. You will: * Use graphlab SArray and SFrame functions to compute important summary statistics * Write a function to compute the Simple Linear Regression weights using the closed form solution * Write a function to make predictions of the output given the input feature * Turn the regression around to predict the input given the output * Compare two different models for predicting house prices In this notebook you will be provided with some already complete code as well as some code that you should complete yourself in order to answer quiz questions. The code we provide to complte is optional and is there to assist you with solving the problems but feel free to ignore the helper code and write your own. Fire up graphlab create End of explanation """ sales = graphlab.SFrame('kc_house_data.gl/') """ Explanation: Load house sales data Dataset is from house sales in King County, the region where the city of Seattle, WA is located. End of explanation """ train_data,test_data = sales.random_split(.8,seed=0) """ Explanation: Split data into training and testing We use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you). End of explanation """ # Let's compute the mean of the House Prices in King County in 2 different ways. prices = sales['price'] # extract the price column of the sales SFrame -- this is now an SArray # recall that the arithmetic average (the mean) is the sum of the prices divided by the total number of houses: sum_prices = prices.sum() num_houses = prices.size() # when prices is an SArray .size() returns its length avg_price_1 = sum_prices/num_houses avg_price_2 = prices.mean() # if you just want the average, the .mean() function print "average price via method 1: " + str(avg_price_1) print "average price via method 2: " + str(avg_price_2) """ Explanation: Useful SFrame summary functions In order to make use of the closed form soltion as well as take advantage of graphlab's built in functions we will review some important ones. In particular: * Computing the sum of an SArray * Computing the arithmetic average (mean) of an SArray * multiplying SArrays by constants * multiplying SArrays by other SArrays End of explanation """ # if we want to multiply every price by 0.5 it's a simple as: half_prices = 0.5*prices # Let's compute the sum of squares of price. We can multiply two SArrays of the same length elementwise also with * prices_squared = prices*prices sum_prices_squared = prices_squared.sum() # price_squared is an SArray of the squares and we want to add them up. print "the sum of price squared is: " + str(sum_prices_squared) """ Explanation: As we see we get the same answer both ways End of explanation """ def simple_linear_regression(input_feature, output): # compute the mean of input_feature and output # compute the product of the output and the input_feature and its mean # compute the squared value of the input_feature and its mean # use the formula for the slope # use the formula for the intercept return (intercept, slope) """ Explanation: Aside: The python notation x.xxe+yy means x.xx * 10^(yy). e.g 100 = 10^2 = 1*10^2 = 1e2 Build a generic simple linear regression function Armed with these SArray functions we can use the closed form solution found from lecture to compute the slope and intercept for a simple linear regression on observations stored as SArrays: input_feature, output. Complete the following function (or write your own) to compute the simple linear regression slope and intercept: End of explanation """ test_feature = graphlab.SArray(range(5)) test_output = graphlab.SArray(1 + 1*test_feature) (test_intercept, test_slope) = simple_linear_regression(test_feature, test_output) print "Intercept: " + str(test_intercept) print "Slope: " + str(test_slope) """ Explanation: We can test that our function works by passing it something where we know the answer. In particular we can generate a feature and then put the output exactly on a line: output = 1 + 1*input_feature then we know both our slope and intercept should be 1 End of explanation """ sqft_intercept, sqft_slope = simple_linear_regression(train_data['sqft_living'], train_data['price']) print "Intercept: " + str(sqft_intercept) print "Slope: " + str(sqft_slope) """ Explanation: Now that we know it works let's build a regression model for predicting price based on sqft_living. Rembember that we train on train_data! End of explanation """ def get_regression_predictions(input_feature, intercept, slope): # calculate the predicted values: return predicted_values """ Explanation: Predicting Values Now that we have the model parameters: intercept & slope we can make predictions. Using SArrays it's easy to multiply an SArray by a constant and add a constant value. Complete the following function to return the predicted output given the input_feature, slope and intercept: End of explanation """ my_house_sqft = 2650 estimated_price = get_regression_predictions(my_house_sqft, sqft_intercept, sqft_slope) print "The estimated price for a house with %d squarefeet is $%.2f" % (my_house_sqft, estimated_price) """ Explanation: Now that we can calculate a prediction given the slop and intercept let's make a prediction. Use (or alter) the following to find out the estimated price for a house with 2650 squarefeet according to the squarefeet model we estiamted above. Quiz Question: Using your Slope and Intercept from (4), What is the predicted price for a house with 2650 sqft? End of explanation """ def get_residual_sum_of_squares(input_feature, output, intercept, slope): # First get the predictions # then compute the residuals (since we are squaring it doesn't matter which order you subtract) # square the residuals and add them up return(RSS) """ Explanation: Residual Sum of Squares Now that we have a model and can make predictions let's evaluate our model using Residual Sum of Squares (RSS). Recall that RSS is the sum of the squares of the residuals and the residuals is just a fancy word for the difference between the predicted output and the true output. Complete the following (or write your own) function to compute the RSS of a simple linear regression model given the input_feature, output, intercept and slope: End of explanation """ print get_residual_sum_of_squares(test_feature, test_output, test_intercept, test_slope) # should be 0.0 """ Explanation: Let's test our get_residual_sum_of_squares function by applying it to the test model where the data lie exactly on a line. Since they lie exactly on a line the residual sum of squares should be zero! End of explanation """ rss_prices_on_sqft = get_residual_sum_of_squares(train_data['sqft_living'], train_data['price'], sqft_intercept, sqft_slope) print 'The RSS of predicting Prices based on Square Feet is : ' + str(rss_prices_on_sqft) """ Explanation: Now use your function to calculate the RSS on training data from the squarefeet model calculated above. Quiz Question: According to this function and the slope and intercept from the squarefeet model What is the RSS for the simple linear regression using squarefeet to predict prices on TRAINING data? End of explanation """ def inverse_regression_predictions(output, intercept, slope): # solve output = intercept + slope*input_feature for input_feature. Use this equation to compute the inverse predictions: return estimated_feature """ Explanation: Predict the squarefeet given price What if we want to predict the squarefoot given the price? Since we have an equation y = a + b*x we can solve the function for x. So that if we have the intercept (a) and the slope (b) and the price (y) we can solve for the estimated squarefeet (x). Comlplete the following function to compute the inverse regression estimate, i.e. predict the input_feature given the output! End of explanation """ my_house_price = 800000 estimated_squarefeet = inverse_regression_predictions(my_house_price, sqft_intercept, sqft_slope) print "The estimated squarefeet for a house worth $%.2f is %d" % (my_house_price, estimated_squarefeet) """ Explanation: Now that we have a function to compute the squarefeet given the price from our simple regression model let's see how big we might expect a house that coses $800,000 to be. Quiz Question: According to this function and the regression slope and intercept from (3) what is the estimated square-feet for a house costing $800,000? End of explanation """ # Estimate the slope and intercept for predicting 'price' based on 'bedrooms' """ Explanation: New Model: estimate prices from bedrooms We have made one model for predicting house prices using squarefeet, but there are many other features in the sales SFrame. Use your simple linear regression function to estimate the regression parameters from predicting Prices based on number of bedrooms. Use the training data! End of explanation """ # Compute RSS when using bedrooms on TEST data: # Compute RSS when using squarfeet on TEST data: """ Explanation: Test your Linear Regression Algorithm Now we have two models for predicting the price of a house. How do we know which one is better? Calculate the RSS on the TEST data (remember this data wasn't involved in learning the model). Compute the RSS from predicting prices using bedrooms and from predicting prices using squarefeet. Quiz Question: Which model (square feet or bedrooms) has lowest RSS on TEST data? Think about why this might be the case. End of explanation """
rueedlinger/machine-learning-snippets
notebooks/supervised/text_classification/text_classification.ipynb
mit
import re import urllib.request ''' with urllib.request.urlopen('http://www.gutenberg.org/cache/epub/22465/pg22465.txt') as response: txt_german = response.read().decode('utf-8') with urllib.request.urlopen('https://www.gutenberg.org/files/46/46-0.txt') as response: txt_english = response.read().decode('utf-8') with urllib.request.urlopen('http://www.gutenberg.org/cache/epub/16021/pg16021.txt') as response: txt_french = response.read().decode('utf-8') with urllib.request.urlopen('http://www.gutenberg.org/cache/epub/28560/pg28560.txt') as response: txt_dutch = response.read().decode('utf-8') ''' with open('data/pg22465.txt', 'r') as reader: txt_german = reader.read() with open('data/46-0.txt', 'r') as reader: txt_english = reader.read() with open('data/pg16021.txt', 'r') as reader: txt_french = reader.read() with open('data/pg28560.txt', 'r') as reader: txt_dutch = reader.read() def get_markers(txt, begin_pattern, end_pattern): iter = re.finditer(begin_pattern, txt) index_headers = [m.start(0) for m in iter] iter = re.finditer(end_pattern, txt) index_footers = [m.start(0) for m in iter] # return first match return index_headers[0] + len(begin_pattern.replace('\\','')), index_footers[0] def extract_text_tokens(txt, begin_pattern='\*\*\* START OF THIS PROJECT GUTENBERG EBOOK', end_pattern='\*\*\* END OF THIS PROJECT GUTENBERG EBOOK'): header, footer = get_markers(txt, begin_pattern, end_pattern) return txt[header: footer].split() tokens_german = extract_text_tokens(txt_german) tokens_english = extract_text_tokens(txt_english) tokens_french = extract_text_tokens(txt_french) tokens_dutch = extract_text_tokens(txt_dutch) print('tokens (german)', len(tokens_german)) print('tokens (english)', len(tokens_english)) print('tokens (french)', len(tokens_french)) print('tokens (dutch)', len(tokens_dutch)) """ Explanation: Text Classification (scikit-learn) with Naive Bayes In this Machine Learning Snippet we use scikit-learn (http://scikit-learn.org/) and ebooks from Project Gutenberg (https://www.gutenberg.org/) to create a text classifier, which can classify German, French, Dutch and English documents. We need one document per language and split the document into smaller chuncks to train the classifier. For our snippet we use the following ebooks: - 'A Christmas Carol' by Charles Dickens (English), https://www.gutenberg.org/ebooks/46 - 'Der Weihnachtsabend' by Charles Dickens (German), https://www.gutenberg.org/ebooks/22465 - 'Cantique de Noël' by Charles Dickens (French), https://www.gutenberg.org/ebooks/16021 - 'Een Kerstlied in Proza' by Charles Dickens (Dutch), https://www.gutenberg.org/ebooks/28560 Note: The ebooks are for the use of anyone anywhere at no cost and with almost no restrictions whatsoever. You may copy it, give it away or re-use it under the terms of the Project Gutenberg License included with this eBook or online at www.gutenberg.org Gathering data First let's extract the text without the header and footer from the ebooks and split the text by whitespace in tokens. End of explanation """ import re def remove_special_chars(x): # remove special characters chars = ['_', '(', ')', '*', '"', '[', ']', '?', '!', ',', '.', '»', '«', ':', ';'] for c in chars: x = x.replace(c, '') # remove numbers x = re.sub('\d', '', x) return x def clean_data(featurs): # strip, remove sepcial characters and numbers tokens = [remove_special_chars(x.strip()) for x in featurs] cleaned = [] # only use words with length > 1 for t in tokens: if len(t) > 1: cleaned.append(t) return cleaned cleaned_tokens_english = clean_data(tokens_english) cleaned_tokens_german = clean_data(tokens_german) cleaned_tokens_french = clean_data(tokens_french) cleaned_tokens_dutch = clean_data(tokens_dutch) print('cleaned tokens (german)', len(cleaned_tokens_german)) print('cleaned tokens (french)', len(cleaned_tokens_french)) print('cleaned tokens (dutch)', len(cleaned_tokens_dutch)) print('cleaned tokens (english)', len(cleaned_tokens_english)) """ Explanation: Data preparation Next we do some data cleaning. This means we remove special characters and numbers. End of explanation """ from sklearn.utils import resample max_tokens = 20 max_samples = 1300 def create_text_sample(x): data = [] text = [] for i, f in enumerate(x): text.append(f) if i % max_tokens == 0 and i != 0: data.append(' '.join(text)) text = [] return data sample_german = resample(create_text_sample(cleaned_tokens_german), replace=False, n_samples=max_samples) sample_french = resample(create_text_sample(cleaned_tokens_french), replace=False, n_samples=max_samples) sample_dutch = resample(create_text_sample(cleaned_tokens_dutch), replace=False, n_samples=max_samples) sample_english = resample(create_text_sample(cleaned_tokens_english), replace=False, n_samples=max_samples) print('samples (german)', len(sample_german)) print('samples (french)', len(sample_french)) print('samples (dutch)', len(sample_dutch)) print('samples (english)', len(sample_english)) """ Explanation: Now we create for every language 1300 text samples with 20 tokens (words). These samples will later be used to train and test our model. End of explanation """ print('English sample:\n------------------') print(sample_english[0]) print('------------------') """ Explanation: A text sample looks like this. End of explanation """ class dotdict(dict): """dot.notation access to dictionary attributes""" __getattr__ = dict.get __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ def create_data_structure(**kwargs): data = dotdict({'labels':[]}) data.samples = dotdict({'text': [], 'target': []}) label = 0 for name, value in kwargs.items(): data.labels.append(name) for i in value: data.samples.text.append(i) data.samples.target.append(label) label += 1 return data data = create_data_structure(de = sample_german, en = sample_english, fr = sample_french, nl = sample_dutch) print('labels: ', data.labels) print('target (labels encoded): ', set(data.samples.target)) print('samples: ', len(data.samples.text)) """ Explanation: Choosing a model As classifier we use the MultinomialNB classifier with the TfidfVectorizer. First we create the data structure which we will use to train the model. ``` { samples: { text:[], target: [] } labels: [] } ``` End of explanation """ from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(data.samples.text, data.samples.target, test_size=0.40) print('train size (x, y): ', len(x_train), len(y_train)) print('test size (x, y): ', len(x_test), len(y_test)) """ Explanation: Training It's importan that we shuffle and split the data into training (70%) and test set (30%) End of explanation """ from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.pipeline import Pipeline from sklearn.naive_bayes import MultinomialNB stopwords = ['scrooge', 'scrooges', 'bob'] pipeline = Pipeline([('vect', TfidfVectorizer(analyzer='word', min_df=10, lowercase=True, stop_words=stopwords)), ('clf', MultinomialNB(alpha=1.0))]) """ Explanation: We connect all our parts (classifier, etc.) to our Machine Learning Pipeline. So it’s easier and faster to go trough all processing steps to build a model. The TfidfVectorizer will use the the word analyzer, min document frequency of 10 and convert the text to lowercase. I know we already did a lowercase conversion in the previous step. We also provide some stop words which should be ignored in our model. The MultinomialNB classifier wil use the default alpha value 1.0. Here you can play around with the settings. In the next section you see how to evaluate your model. End of explanation """ from sklearn.model_selection import KFold from sklearn import model_selection folds = 5 for scoring in ['f1_weighted', 'accuracy']: scores = model_selection.cross_val_score(pipeline, X=x_train, y=y_train, cv=folds, scoring=scoring) print(scoring) print('scores: %s' % scores ) print(scoring + ': %0.6f (+/- %0.4f)' % (scores.mean(), scores.std() * 2)) print() """ Explanation: Evaluation In this step we will evaluate the performance of our classifier. So we do the following evaluation: - Evaluate the model with k-fold on the training set - Evaluate the final model with the test set First let's evaluate our model with a k-fold cross validation. End of explanation """ from sklearn import metrics text_clf = pipeline.fit(x_train, y_train) predicted = text_clf.predict(x_test) print(metrics.classification_report(y_test, predicted, digits=4)) """ Explanation: Next we build the model and evaluate the result against our test set. End of explanation """ import numpy as np # show most informative features def show_top10(classifier, vectorizer, categories): feature_names = np.asarray(vectorizer.get_feature_names()) for i, category in enumerate(categories): top10 = np.argsort(classifier.feature_log_prob_[i])[-10:] print("%s: %s" % (category, " ".join(feature_names[top10]))) show_top10(text_clf.named_steps['clf'], text_clf.named_steps['vect'], data.labels) """ Explanation: Examine the features of the model Let's see what are the most informative features End of explanation """ feature_names = np.asarray(text_clf.named_steps['vect'].get_feature_names()) print('number of features: %d' % len(feature_names)) print('first features: %s'% feature_names[0:10]) print('last features: %s' % feature_names[-10:]) """ Explanation: Let's see which and how many features our model has. End of explanation """ new_data = ['Hallo mein Name ist Hugo.', 'Hi my name is Hugo.', 'Bonjour mon nom est Hugo.', 'Hallo mijn naam is Hugo.', 'Eins, zwei und drei.', 'One, two and three.', 'Un, deux et trois.', 'Een, twee en drie.' ] predicted = text_clf.predict(new_data) probs = text_clf.predict_proba(new_data) for i, p in enumerate(predicted): print(new_data[i], ' --> ', data.labels[p], ', prob:' , probs[i][p]) """ Explanation: New data Let's try out the classifier with the new data. End of explanation """
darioizzo/d-CGP
doc/sphinx/notebooks/symbolic_regression_3.ipynb
gpl-3.0
# Some necessary imports. import dcgpy import pygmo as pg # Sympy is nice to have for basic symbolic manipulation. from sympy import init_printing from sympy.parsing.sympy_parser import * init_printing() # Fundamental for plotting. from matplotlib import pyplot as plt %matplotlib inline """ Explanation: Multi-objective memetic approach In this third tutorial we consider an example with two dimensional input data and we approach its solution using a multi-objective approach where, aside the loss, we consider the formula complexity as a second objective. We will use a memetic approach to learn the model parameters while evolution will shape the model itself. Eventually you will learn: How to instantiate a multi-objective symbolic regression problem. How to use a memetic multi-objective approach to find suitable models for your data End of explanation """ # We load our data from some available ones shipped with dcgpy. # In this particular case we use the problem sinecosine from the paper: # Vladislavleva, Ekaterina J., Guido F. Smits, and Dick Den Hertog. # "Order of nonlinearity as a complexity measure for models generated by symbolic regression via pareto genetic # programming." IEEE Transactions on Evolutionary Computation 13.2 (2008): 333-349. X, Y = dcgpy.generate_sinecosine() from mpl_toolkits.mplot3d import Axes3D # And we plot them as to visualize the problem. fig = plt.figure() ax = fig.add_subplot(111, projection='3d') _ = ax.scatter(X[:,0], X[:,1], Y[:,0]) """ Explanation: 1 - The data End of explanation """ # We define our kernel set, that is the mathematical operators we will # want our final model to possibly contain. What to choose in here is left # to the competence and knowledge of the user. A list of kernels shipped with dcgpy # can be found on the online docs. The user can also define its own kernels (see the corresponding tutorial). ss = dcgpy.kernel_set_double(["sum", "diff", "mul", "sin", "cos"]) # We instantiate the symbolic regression optimization problem # Note how we specify to consider one ephemeral constant via # the kwarg n_eph. We also request 100 kernels with a linear # layout (this allows for the construction of longer expressions) and # we set the level back to 101 (in an attempt to skew the search towards # simple expressions) udp = dcgpy.symbolic_regression( points = X, labels = Y, kernels=ss(), rows = 1, cols = 100, n_eph = 1, levels_back = 101, multi_objective=True) prob = pg.problem(udp) print(udp) """ Explanation: 2 - The symbolic regression problem End of explanation """ # We instantiate here the evolutionary strategy we want to use to # search for models. Note we specify we want the evolutionary operators # to be applied also to the constants via the kwarg *learn_constants* uda = dcgpy.momes4cgp(gen = 250, max_mut = 4) algo = pg.algorithm(uda) algo.set_verbosity(10) """ Explanation: 3 - The search algorithm End of explanation """ # We use a population of 100 individuals pop = pg.population(prob, 100) # Here is where we run the actual evolution. Note that the screen output # will show in the terminal (not on your Jupyter notebook in case # you are using it). Note you will have to run this a few times before # solving the problem entirely. pop = algo.evolve(pop) """ Explanation: 4 - The search End of explanation """ # Compute here the non dominated front. ndf = pg.non_dominated_front_2d(pop.get_f()) # Inspect the front and print the proposed expressions. print("{: >20} {: >30}".format("Loss:", "Model:"), "\n") for idx in ndf: x = pop.get_x()[idx] f = pop.get_f()[idx] a = parse_expr(udp.prettier(x))[0] print("{: >20} | {: >30}".format(str(f[0]), str(a)), "|") # Lets have a look to the non dominated fronts in the final population. ax = pg.plot_non_dominated_fronts(pop.get_f()) _ = plt.xlabel("loss") _ = plt.ylabel("complexity") _ = plt.title("Non dominate fronts") """ Explanation: 5 - Inspecting the non dominated front End of explanation """ # Here we get the log of the latest call to the evolve log = algo.extract(dcgpy.momes4cgp).get_log() gen = [it[0] for it in log] loss = [it[2] for it in log] compl = [it[4] for it in log] # And here we plot, for example, the generations against the best loss _ = plt.plot(gen, loss) _ = plt.title('last call to evolve') _ = plt.xlabel('generations') _ = plt.ylabel('loss') """ Explanation: 6 - Lets have a look to the log content End of explanation """
florianwittkamp/FD_ACOUSTIC
JupyterNotebook/2D/FD_2D_DX4_DT2_fast.ipynb
gpl-3.0
%matplotlib inline import numpy as np import matplotlib.pyplot as plt """ Explanation: FD_2D_DX4_DT2_fast 2-D acoustic Finite-Difference modelling GNU General Public License v3.0 Author: Florian Wittkamp Finite-Difference acoustic seismic wave simulation Discretization of the first-order acoustic wave equation Temporal second-order accuracy $O(\Delta T^2)$ Spatial fourth-order accuracy $O(\Delta X^4)$ Initialisation End of explanation """ # Discretization c1=30 # Number of grid points per dominant wavelength c2=0.2 # CFL-Number nx=300 # Number of grid points in X ny=300 # Number of grid points in Y T=1 # Total propagation time # Source Signal f0= 5 # Center frequency Ricker-wavelet q0= 100 # Maximum amplitude Ricker-Wavelet xscr = 150 # Source position (in grid points) in X yscr = 150 # Source position (in grid points) in Y # Receiver xrec1=150; yrec1=120; # Position Reciever 1 (in grid points) xrec2=150; yrec2=150; # Position Reciever 2 (in grid points) xrec3=150; yrec3=180;# Position Reciever 3 (in grid points) # Velocity and density modell_v = 3000*np.ones((ny,nx)) rho=2.2*np.ones((ny,nx)) """ Explanation: Input Parameter End of explanation """ # Init wavefields vx=np.zeros(shape = (ny,nx)) vy=np.zeros(shape = (ny,nx)) p=np.zeros(shape = (ny,nx)) vx_x=np.zeros(shape = (ny,nx)) vy_y=np.zeros(shape = (ny,nx)) p_x=np.zeros(shape = (ny,nx)) p_y=np.zeros(shape = (ny,nx)) # Calculate first Lame-Paramter l=rho * modell_v * modell_v cmin=min(modell_v.flatten()) # Lowest P-wave velocity cmax=max(modell_v.flatten()) # Highest P-wave velocity fmax=2*f0 # Maximum frequency dx=cmin/(fmax*c1) # Spatial discretization (in m) dy=dx # Spatial discretization (in m) dt=dx/(cmax)*c2 # Temporal discretization (in s) lampda_min=cmin/fmax # Smallest wavelength # Output model parameter: print("Model size: x:",dx*nx,"in m, y:",dy*ny,"in m") print("Temporal discretization: ",dt," s") print("Spatial discretization: ",dx," m") print("Number of gridpoints per minimum wavelength: ",lampda_min/dx) """ Explanation: Preparation End of explanation """ x=np.arange(0,dx*nx,dx) # Space vector in X y=np.arange(0,dy*ny,dy) # Space vector in Y t=np.arange(0,T,dt) # Time vector nt=np.size(t) # Number of time steps # Plotting model fig, (ax1, ax2) = plt.subplots(1, 2) fig.subplots_adjust(wspace=0.4,right=1.6) ax1.plot(x,modell_v) ax1.set_ylabel('VP in m/s') ax1.set_xlabel('Depth in m') ax1.set_title('P-wave velocity') ax2.plot(x,rho) ax2.set_ylabel('Density in g/cm^3') ax2.set_xlabel('Depth in m') ax2.set_title('Density'); """ Explanation: Create space and time vector End of explanation """ tau=np.pi*f0*(t-1.5/f0) q=q0*(1.0-2.0*tau**2.0)*np.exp(-tau**2) # Plotting source signal plt.figure(3) plt.plot(t,q) plt.title('Source signal Ricker-Wavelet') plt.ylabel('Amplitude') plt.xlabel('Time in s') plt.draw() """ Explanation: Source signal - Ricker-wavelet End of explanation """ # Init Seismograms Seismogramm=np.zeros((3,nt)); # Three seismograms # Calculation of some coefficients i_dx=1.0/(dx) i_dy=1.0/(dy) c1=9.0/(8.0*dx) c2=1.0/(24.0*dx) c3=9.0/(8.0*dy) c4=1.0/(24.0*dy) c5=1.0/np.power(dx,3) c6=1.0/np.power(dy,3) c7=1.0/np.power(dx,2) c8=1.0/np.power(dy,2) c9=np.power(dt,3)/24.0 # Prepare slicing parameter: kxM2=slice(5-2,nx-4-2) kxM1=slice(5-1,nx-4-1) kx=slice(5,nx-4) kxP1=slice(5+1,nx-4+1) kxP2=slice(5+2,nx-4+2) kyM2=slice(5-2,ny-4-2) kyM1=slice(5-1,ny-4-1) ky=slice(5,ny-4) kyP1=slice(5+1,ny-4+1) kyP2=slice(5+2,ny-4+2) ## Time stepping print("Starting time stepping...") for n in range(2,nt): # Inject source wavelet p[yscr,xscr]=p[yscr,xscr]+q[n] # Update velocity p_x[ky,kx]=c1*(p[ky,kxP1]-p[ky,kx])-c2*(p[ky,kxP2]-p[ky,kxM1]) p_y[ky,kx]=c3*(p[kyP1,kx]-p[ky,kx])-c4*(p[kyP2,kx]-p[kyM1,kx]) vx=vx-dt/rho*p_x vy=vy-dt/rho*p_y # Update pressure vx_x[ky,kx]=c1*(vx[ky,kx]-vx[ky,kxM1])-c2*(vx[ky,kxP1]-vx[ky,kxM2]) vy_y[ky,kx]=c3*(vy[ky,kx]-vy[kyM1,kx])-c4*(vy[kyP1,kx]-vy[kyM2,kx]) p=p-l*dt*(vx_x+vy_y) # Save seismograms Seismogramm[0,n]=p[yrec1,xrec1] Seismogramm[1,n]=p[yrec2,xrec2] Seismogramm[2,n]=p[yrec3,xrec3] print("Finished time stepping!") """ Explanation: Time stepping End of explanation """ ## Save seismograms np.save("Seismograms/FD_2D_DX4_DT2_fast",Seismogramm) """ Explanation: Save seismograms End of explanation """ ## Image plot fig, ax = plt.subplots(1,1) img = ax.imshow(p); ax.set_title('P-Wavefield') ax.set_xticks(range(0,nx+1,int(nx/5))) ax.set_yticks(range(0,ny+1,int(ny/5))) ax.set_xlabel('Grid-points in X') ax.set_ylabel('Grid-points in Y') fig.colorbar(img) ## Plot seismograms fig, (ax1, ax2, ax3) = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.4,right=1.6, top = 2 ) ax1.plot(t,Seismogramm[0,:]) ax1.set_title('Seismogram 1') ax1.set_ylabel('Amplitude') ax1.set_xlabel('Time in s') ax1.set_xlim(0, T) ax2.plot(t,Seismogramm[1,:]) ax2.set_title('Seismogram 2') ax2.set_ylabel('Amplitude') ax2.set_xlabel('Time in s') ax2.set_xlim(0, T) ax3.plot(t,Seismogramm[2,:]) ax3.set_title('Seismogram 3') ax3.set_ylabel('Amplitude') ax3.set_xlabel('Time in s') ax3.set_xlim(0, T); """ Explanation: Plotting End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/nims-kma/cmip6/models/sandbox-1/toplevel.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'nims-kma', 'sandbox-1', 'toplevel') """ Explanation: ES-DOC CMIP6 Model Properties - Toplevel MIP Era: CMIP6 Institute: NIMS-KMA Source ID: SANDBOX-1 Sub-Topics: Radiative Forcings. Properties: 85 (42 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:28 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Flux Correction 3. Key Properties --&gt; Genealogy 4. Key Properties --&gt; Software Properties 5. Key Properties --&gt; Coupling 6. Key Properties --&gt; Tuning Applied 7. Key Properties --&gt; Conservation --&gt; Heat 8. Key Properties --&gt; Conservation --&gt; Fresh Water 9. Key Properties --&gt; Conservation --&gt; Salt 10. Key Properties --&gt; Conservation --&gt; Momentum 11. Radiative Forcings 12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2 13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4 14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O 15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3 16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3 17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC 18. Radiative Forcings --&gt; Aerosols --&gt; SO4 19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon 20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon 21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate 22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect 23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect 24. Radiative Forcings --&gt; Aerosols --&gt; Dust 25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic 26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic 27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt 28. Radiative Forcings --&gt; Other --&gt; Land Use 29. Radiative Forcings --&gt; Other --&gt; Solar 1. Key Properties Key properties of the model 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Top level overview of coupled model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of coupled model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Flux Correction Flux correction properties of the model 2.1. Details Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how flux corrections are applied in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Genealogy Genealogy and history of the model 3.1. Year Released Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Year the model was released End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.2. CMIP3 Parent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 CMIP3 parent if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. CMIP5 Parent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 CMIP5 parent if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.4. Previous Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Previously known as End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Software Properties Software properties of model 4.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.4. Components Structure Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how model realms are structured into independent software components (coupled via a coupler) and internal software components. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OASIS" # "OASIS3-MCT" # "ESMF" # "NUOPC" # "Bespoke" # "Unknown" # "None" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 4.5. Coupler Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Overarching coupling framework for model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Coupling ** 5.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of coupling in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.2. Atmosphere Double Flux Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Atmosphere grid" # "Ocean grid" # "Specific coupler grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 5.3. Atmosphere Fluxes Calculation Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Where are the air-sea fluxes calculated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.4. Atmosphere Relative Winds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Tuning Applied Tuning methodology for model 6.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics/diagnostics of the global mean state used in tuning model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics/diagnostics used in tuning model/component (such as 20th century) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.5. Energy Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.6. Fresh Water Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Conservation --&gt; Heat Global heat convervation properties of the model 7.1. Global Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how heat is conserved globally End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Atmos Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the atmosphere/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Atmos Land Interface Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how heat is conserved at the atmosphere/land coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.4. Atmos Sea-ice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the atmosphere/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.5. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.6. Land Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the land/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation --&gt; Fresh Water Global fresh water convervation properties of the model 8.1. Global Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how fresh_water is conserved globally End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.2. Atmos Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh_water is conserved at the atmosphere/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Atmos Land Interface Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how fresh water is conserved at the atmosphere/land coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.4. Atmos Sea-ice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh water is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.6. Runoff Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how runoff is distributed and conserved End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.7. Iceberg Calving Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how iceberg calving is modeled and conserved End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.8. Endoreic Basins Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how endoreic basins (no ocean access) are treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.9. Snow Accumulation Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how snow accumulation over land and over sea-ice is treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Key Properties --&gt; Conservation --&gt; Salt Global salt convervation properties of the model 9.1. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how salt is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10. Key Properties --&gt; Conservation --&gt; Momentum Global momentum convervation properties of the model 10.1. Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how momentum is conserved in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11. Radiative Forcings Radiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5) 11.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of radiative forcings (GHG and aerosols) implementation in model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2 Carbon dioxide forcing 12.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4 Methane forcing 13.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 13.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O Nitrous oxide forcing 14.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3 Troposheric ozone forcing 15.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3 Stratospheric ozone forcing 16.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC Ozone-depleting and non-ozone-depleting fluorinated gases forcing 17.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "Option 1" # "Option 2" # "Option 3" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.2. Equivalence Concentration Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Details of any equivalence concentrations used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Radiative Forcings --&gt; Aerosols --&gt; SO4 SO4 aerosol forcing 18.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon Black carbon aerosol forcing 19.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon Organic carbon aerosol forcing 20.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate Nitrate forcing 21.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 21.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect Cloud albedo effect forcing (RFaci) 22.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22.2. Aerosol Effect On Ice Clouds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative effects of aerosols on ice clouds are represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect Cloud lifetime effect forcing (ERFaci) 23.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.2. Aerosol Effect On Ice Clouds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative effects of aerosols on ice clouds are represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.3. RFaci From Sulfate Only Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative forcing from aerosol cloud interactions from sulfate aerosol only? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24. Radiative Forcings --&gt; Aerosols --&gt; Dust Dust forcing 24.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 24.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic Tropospheric volcanic forcing 25.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.2. Historical Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in historical simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.3. Future Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in future simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 25.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic Stratospheric volcanic forcing 26.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26.2. Historical Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in historical simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26.3. Future Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in future simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 26.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt Sea salt forcing 27.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 28. Radiative Forcings --&gt; Other --&gt; Land Use Land use forcing 28.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 28.2. Crop Change Only Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Land use change represented via crop change only? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 28.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "irradiance" # "proton" # "electron" # "cosmic ray" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 29. Radiative Forcings --&gt; Other --&gt; Solar Solar forcing 29.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How solar forcing is provided End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 29.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """
rahulk90/vae_sparse
expt/TrainingVAEsparse.ipynb
mit
import sys,os,glob from collections import OrderedDict import numpy as np from utils.misc import readPickle, createIfAbsent sys.path.append('../') from optvaedatasets.load import loadDataset as loadDataset_OVAE from sklearn.feature_extraction.text import TfidfTransformer """ Explanation: VAEs on sparse data The following notebook provides an example of how to load a dataset, setup parameters for it, create the model and train it for a few epochs. In the notebook, we will use with the RCV1 dataset (assuming it has been setup previously). For details on how to set it up, run python rcv2.py in the optvaedatasets folder End of explanation """ default_params = readPickle('../optvaeutils/default_settings.pkl')[0] for k in default_params: print '(',k,default_params[k],')', print """ Explanation: Model Parameters The model parameters have been saved here, we'll load them and look at them These are what the model will be built based on End of explanation """ default_params['opt_type'] = 'finopt' #set to finopt to optimize var. params, none otherwise default_params['n_steps'] = 5 #temporary directory where checkpoints are saved default_params['savedir'] = './tmp' """ Explanation: For the moment, we will leave everything as is. Some worthwhile parameters to note: * n_steps: Number of steps of optimizing $\psi(x)$, the local variational parameters as output by the inference network. We'll set this to 10 below for the moment. * dim_stochastic: Number of latent dimensions. End of explanation """ dset = loadDataset_OVAE('rcv2') #Visualize structure of dataset dict for k in dset: print k, type(dset[k]), if hasattr(dset[k],'shape'): print dset[k].shape elif type(dset[k]) is not list: print dset[k] else: print #Add parameters to default_params for k in ['dim_observations','data_type']: default_params[k] = dset[k] default_params['max_word_count'] =dset['train'].max() #Create IDF additional_attrs = {} tfidf = TfidfTransformer(norm=None) tfidf.fit(dset['train']) additional_attrs['idf'] = tfidf.idf_ from optvaemodels.vae import VAE as Model import optvaemodels.vae_learn as Learn import optvaemodels.vae_evaluate as Evaluate """ Explanation: Load dataset Lets load the RCV1(v2) dataset and visualize how the dataset &lt;dict&gt; is structured We'll need to append some parameters from the dataset into the default parameters dict that we will use to create the model Also, compute the idf vectors for the entire dataset (the term frequencies will be multiplied dynamically) inside the model End of explanation """ default_params['savedir']+='-rcv2-'+default_params['opt_type'] createIfAbsent(default_params['savedir']) pfile= default_params['savedir']+'/'+default_params['unique_id']+'-config.pkl' print 'Training model from scratch. Parameters in: ',pfile model = Model(default_params, paramFile = pfile, additional_attrs = additional_attrs) """ Explanation: Setup Create directory for configuration files. The configuration file for a single experiment is in the pickle file. We will use this directory to save checkpoint files as well End of explanation """ savef = os.path.join(default_params['savedir'],default_params['unique_id']) #Prefix for saving in checkpoint directory savedata = Learn.learn( model, dataset = dset['train'], epoch_start = 0 , epoch_end = 3, #epochs -- set w/ default_params['epochs'] batch_size = default_params['batch_size'], #batch size savefreq = default_params['savefreq'], #frequency of saving savefile = savef, dataset_eval= dset['valid'] ) for k in savedata: print k, type(savedata[k]), savedata[k].shape """ Explanation: Training the model We can now train the model we created This is the overall setup for the file train.py End of explanation """
NorfolkDataSci/presentations
2017-10_class_imbalance/Class Imbalance.ipynb
mit
%matplotlib inline from sklearn import utils import matplotlib import warnings warnings.filterwarnings('ignore') import numpy as np import pandas as pd from imblearn.over_sampling import SMOTE import matplotlib.pyplot as plt plt.style.use('ggplot') from sklearn.linear_model import LogisticRegression #metrics to print from sklearn.linear_model import LogisticRegression from sklearn.cross_validation import train_test_split from sklearn import metrics from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score from sklearn.metrics import recall_score # percision recall curve from sklearn.metrics import precision_recall_curve from sklearn.metrics import average_precision_score %matplotlib inline np.random.seed(1) #to reproduce results #use Iris data and pick one flower to filter down # currently each has 50 col = ['sepal_length','sepal_width','petal_length','petal_width','type'] data = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', names = col) #pick a flower and select 10 out of the 50 observations virginica = data[data.type == 'Iris-virginica'].sample(frac=0.2).copy() not_virginica = data[data.type != 'Iris-virginica'] df = pd.concat([virginica,not_virginica]) #turn into binary df['virginica'] = np.where(df['type']=='Iris-virginica', 1, 0) df.drop('type',inplace=True, axis=1) print('Pct Minority: ' + str(round((df.virginica.sum()/df.virginica.count())*100,2)) + '%') print('Pct Majority: ' + str(round((1-df.virginica.sum()/df.virginica.count())*100,2)) + '%') def evaluation(y,y_prob,ztype): y_class = np.where(y_prob > .5,1,0) acc = metrics.accuracy_score(y, y_class) f1 = metrics.f1_score(y, y_class) pre = precision_score(y,y_class) rec = recall_score(y,y_class) print('Evaluation for ' + ztype) print('Accuracy : ', str(round(acc,4))) print('F1 : ', str(round(f1,4))) print('Precision: ', str(round(pre,4))) print('Recall : ', str(round(rec,4))) print() print(confusion_matrix(y, y_class)) """ Explanation: Welcome to Norfolk Data Science <img src="https://secure.meetupstatic.com/photos/event/c/f/b/a/highres_452453178.jpeg"> Why class imbalances ruin predictions and how to remedy 2017-10-03 Christopher Brossman What is the class imbalance problem? <img src="http://sci2s.ugr.es/sites/default/files/files/ComplementaryMaterial/imbalanced/yeast4_s1.0tr_mcg_vs_gvh.png"> Lets look at some real data End of explanation """ X_train, X_dev, y_train, y_dev = train_test_split(df.drop('virginica',axis=1), df.virginica, test_size=0.3,random_state=0) print("virginica in train set = ", str(y_train.sum())) print("virginica in dev set = ", str(y_dev.sum())) print() logistic = LogisticRegression() logistic.fit(X_train, y_train) #Get predicted classes y_train_pred = logistic.predict_proba(X_train)[:,1] y_dev_pred = logistic.predict_proba(X_dev)[:,1] evaluation(y_train,y_train_pred,'training set') print() evaluation(y_dev,y_dev_pred,'testing set') """ Explanation: Imbalance is Common, and Accuracy is NOT the right metric In prior example 9% of classes were "virginica" and 91% were "not virginica" If we predicted all classes were "not virginica" we would have 91% accuracy! Your classifier may be doing this! ... but the accuracy... it is like a paradox Can you collect more data? You might think it’s silly, but collecting more data is almost always overlooked. Can you collect more data? Take a second and think about whether you are able to gather more data on your problem. A larger dataset might expose a different and perhaps more balanced perspective on the classes. More examples of minor classes may be useful later when we look at resampling your dataset. Try Changing Your Performance Metric As mentioned prior - accuracy is a paradox and no longer the appropriate measurement. F1 Score (or F-score): A weighted average of precision and recall. - probably the single best measurement Precision: A measure of a classifiers exactness. Recall: A measure of a classifiers completeness Confusion Matrix -- always check the confusion matrix! Also check out these metrics Kappa (or Cohen’s kappa): Classification accuracy normalized by the imbalance of the classes in the data. ROC Curves: either traditional OR precision/recall ROC End of explanation """ #undersample virginica = df[df.virginica == 1].copy() not_virginica = df[df.virginica == 0 ].sample(frac=0.5).copy() df_undersample = pd.concat([virginica,not_virginica]) X_train, X_dev, y_train, y_dev = train_test_split(df_undersample.drop('virginica',axis=1), df_undersample.virginica, test_size=0.3,random_state=0) print("virginica in train set = ", str(y_train.sum())) print("virginica in dev set = ", str(y_dev.sum())) logistic = LogisticRegression() logistic.fit(X_train, y_train) #Get predicted classes y_train_pred = logistic.predict_proba(X_train)[:,1] y_dev_pred = logistic.predict_proba(X_dev)[:,1] evaluation(y_train,y_train_pred,'training set') print() evaluation(y_dev,y_dev_pred,'testing set') """ Explanation: Try Resampling Your Dataset Risk if undersample <img src="http://www.chioka.in/wp-content/uploads/2013/08/Undersampling.png"> Risk if oversample <img src="http://www.chioka.in/wp-content/uploads/2013/08/Oversampling.png"> Some Rules of Thumb Consider testing under-sampling when you have an a lot data (tens- or hundreds of thousands of instances or more) Consider testing over-sampling when you don’t have a lot of data (tens of thousands of records or less) Consider testing random and non-random (e.g. stratified) sampling schemes. Consider testing different resampled ratios (e.g. you don’t have to target a 1:1 ratio in a binary classification problem, try other ratios) End of explanation """ X_train, X_dev, y_train, y_dev = train_test_split(df.drop('virginica',axis=1), df.virginica, test_size=0.3,random_state=0) print("virginica in train set = ", str(y_train.sum())) print("virginica in dev set = ", str(y_dev.sum())) print() #smote sm = SMOTE(ratio=.5,k_neighbors =2,kind='regular',random_state=10); X_train, y_train = sm.fit_sample(X_train, np.ravel(y_train)) print("AFTER SMOTE: virginica in train set = ", str(y_train.sum())) print() logistic = LogisticRegression() logistic.fit(X_train, y_train) #Get predicted classes y_train_pred = logistic.predict_proba(X_train)[:,1] y_dev_pred = logistic.predict_proba(X_dev)[:,1] evaluation(y_train,y_train_pred,'training set') print() evaluation(y_dev,y_dev_pred,'testing set') """ Explanation: Try Generate Synthetic Samples Try SMOTE - how it works: <img src="http://www.chioka.in/wp-content/uploads/2013/08/SMOTE.png"> risk of SMOTE <img src="http://www.chioka.in/wp-content/uploads/2013/08/SMOTE-boundary.png"> End of explanation """ X_train, X_dev, y_train, y_dev = train_test_split(df.drop('virginica',axis=1), df.virginica, test_size=0.3,random_state=0) print("virginica in train set = ", str(y_train.sum())) print("virginica in dev set = ", str(y_dev.sum())) print() #logistic regression has class_weight - to penalize the cost function to be balanced logistic = LogisticRegression(class_weight='balanced') logistic.fit(X_train, y_train) #Get predicted classes y_train_pred = logistic.predict_proba(X_train)[:,1] y_dev_pred = logistic.predict_proba(X_dev)[:,1] evaluation(y_train,y_train_pred,'training set') print() evaluation(y_dev,y_dev_pred,'testing set') """ Explanation: Try Penalized Models You can build in a penalty proportional to the imbalance in the cost function In NN or other algorithms you can define explicitly In sklearn you can use the weighted function In this case it will be similar to getting one TP wrong is equal to getting ~9 TN wrong End of explanation """ #random forest from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() rfc.fit(X_train, y_train) #Get predicted classes y_train_pred = rfc.predict_proba(X_train)[:,1] y_dev_pred = rfc.predict_proba(X_dev)[:,1] evaluation(y_train,y_train_pred,'training set') print() evaluation(y_dev,y_dev_pred,'testing set') """ Explanation: Try Different Algorithms Some algorithms are less suseptible to class imbalances such as tree based methods -- careful not to overfit End of explanation """
computational-class/computational-communication-2016
code/.ipynb_checkpoints/16&17 networkx-checkpoint.ipynb
mit
%matplotlib inline import networkx as nx import matplotlib.cm as cm import matplotlib.pyplot as plt import networkx as nx G=nx.Graph() # G = nx.DiGraph() # 有向网络 # 添加(孤立)节点 G.add_node("spam") # 添加节点和链接 G.add_edge(1,2) print(G.nodes()) print(G.edges()) # 绘制网络 nx.draw(G, with_labels = True) """ Explanation: 网络科学理论简介 网络科学:描述节点属性 王成军 wangchengjun@nju.edu.cn 计算传播网 http://computational-communication.com http://networkx.readthedocs.org/en/networkx-1.11/tutorial/ End of explanation """ G = nx.Graph() n = 0 with open ('/Users/chengjun/bigdata/www.dat.gz.txt') as f: for line in f: n += 1 if n % 10**4 == 0: flushPrint(n) x, y = line.rstrip().split(' ') G.add_edge(x,y) nx.info(G) """ Explanation: WWW Data download http://www3.nd.edu/~networks/resources.htm World-Wide-Web: [README] [DATA] Réka Albert, Hawoong Jeong and Albert-László Barabási: Diameter of the World Wide Web Nature 401, 130 (1999) [ PDF ] 作业: 下载www数据 构建networkx的网络对象g(提示:有向网络) 将www数据添加到g当中 计算网络中的节点数量和链接数量 End of explanation """ G = nx.karate_club_graph() clubs = [G.node[i]['club'] for i in G.nodes()] colors = [] for j in clubs: if j == 'Mr. Hi': colors.append('r') else: colors.append('g') nx.draw(G, with_labels = True, node_color = colors) G.node[1] # 节点1的属性 G.edge.keys()[:3] # 前三条边的id nx.info(G) G.nodes()[:10] G.edges()[:3] G.neighbors(1) nx.average_shortest_path_length(G) """ Explanation: 描述网络 nx.karate_club_graph 我们从karate_club_graph开始,探索网络的基本性质。 End of explanation """ nx.diameter(G)#返回图G的直径(最长最短路径的长度) """ Explanation: 网络直径 End of explanation """ nx.density(G) nodeNum = len(G.nodes()) edgeNum = len(G.edges()) 2.0*edgeNum/(nodeNum * (nodeNum - 1)) """ Explanation: 密度 End of explanation """ cc = nx.clustering(G) cc.items()[:5] plt.hist(cc.values(), bins = 15) plt.xlabel('$Clustering \, Coefficient, \, C$', fontsize = 20) plt.ylabel('$Frequency, \, F$', fontsize = 20) plt.show() """ Explanation: 作业: 计算www网络的网络密度 聚集系数 End of explanation """ # M. E. J. Newman, Mixing patterns in networks Physical Review E, 67 026126, 2003 nx.degree_assortativity_coefficient(G) #计算一个图的度匹配性。 Ge=nx.Graph() Ge.add_nodes_from([0,1],size=2) Ge.add_nodes_from([2,3],size=3) Ge.add_edges_from([(0,1),(2,3)]) print(nx.numeric_assortativity_coefficient(Ge,'size')) # plot degree correlation from collections import defaultdict import numpy as np l=defaultdict(list) g = nx.karate_club_graph() for i in g.nodes(): k = [] for j in g.neighbors(i): k.append(g.degree(j)) l[g.degree(i)].append(np.mean(k)) #l.append([g.degree(i),np.mean(k)]) x = l.keys() y = [np.mean(i) for i in l.values()] #x, y = np.array(l).T plt.plot(x, y, 'r-o', label = '$Karate\;Club$') plt.legend(loc=1,fontsize=10, numpoints=1) plt.xscale('log'); plt.yscale('log') plt.ylabel(r'$<knn(k)$> ', fontsize = 20) plt.xlabel('$k$', fontsize = 20) plt.show() """ Explanation: Spacing in Math Mode In a math environment, LaTeX ignores the spaces you type and puts in the spacing that it thinks is best. LaTeX formats mathematics the way it's done in mathematics texts. If you want different spacing, LaTeX provides the following four commands for use in math mode: \; - a thick space \: - a medium space \, - a thin space \! - a negative thin space 匹配系数 End of explanation """ dc = nx.degree_centrality(G) closeness = nx.closeness_centrality(G) betweenness= nx.betweenness_centrality(G) fig = plt.figure(figsize=(15, 4),facecolor='white') ax = plt.subplot(1, 3, 1) plt.hist(dc.values(), bins = 20) plt.xlabel('$Degree \, Centrality$', fontsize = 20) plt.ylabel('$Frequency, \, F$', fontsize = 20) ax = plt.subplot(1, 3, 2) plt.hist(closeness.values(), bins = 20) plt.xlabel('$Closeness \, Centrality$', fontsize = 20) ax = plt.subplot(1, 3, 3) plt.hist(betweenness.values(), bins = 20) plt.xlabel('$Betweenness \, Centrality$', fontsize = 20) plt.tight_layout() plt.show() fig = plt.figure(figsize=(15, 8),facecolor='white') for k in betweenness: plt.scatter(dc[k], closeness[k], s = betweenness[k]*1000) plt.text(dc[k], closeness[k]+0.02, str(k)) plt.xlabel('$Degree \, Centrality$', fontsize = 20) plt.ylabel('$Closeness \, Centrality$', fontsize = 20) plt.show() """ Explanation: Degree centrality measures.(度中心性) degree_centrality(G) # Compute the degree centrality for nodes. in_degree_centrality(G) # Compute the in-degree centrality for nodes. out_degree_centrality(G) # Compute the out-degree centrality for nodes. closeness_centrality(G[, v, weighted_edges]) # Compute closeness centrality for nodes. betweenness_centrality(G[, normalized, ...]) # Betweenness centrality measures.(介数中心性) End of explanation """ def plotDegreeDistribution(G): plt.plot(nx.degree_histogram(G) , 'ro', markersize = 10) #返回图中所有节点的度分布序列 plt.legend(['Degree']) plt.xlabel('$Degree$', fontsize = 20) plt.ylabel('$Number \, of \, nodes$', fontsize = 20) plt.title('$Degree\,Distribution$', fontsize = 20) plt.xscale('log') plt.yscale('log') plt.show() plotDegreeDistribution(G) """ Explanation: 度分布 End of explanation """ import networkx as nx import matplotlib.pyplot as plt RG = nx.random_graphs.random_regular_graph(3,200) #生成包含20个节点、每个节点有3个邻居的规则图RG pos = nx.spectral_layout(RG) #定义一个布局,此处采用了spectral布局方式,后变还会介绍其它布局方式,注意图形上的区别 nx.draw(RG,pos,with_labels=False,node_size = 30) #绘制规则图的图形,with_labels决定节点是非带标签(编号),node_size是节点的直径 plt.show() #显示图形 plotDegreeDistribution(RG) """ Explanation: 网络科学理论简介 网络科学:分析网络结构 王成军 wangchengjun@nju.edu.cn 计算传播网 http://computational-communication.com 规则网络 End of explanation """ import networkx as nx import matplotlib.pyplot as plt ER = nx.random_graphs.erdos_renyi_graph(200,0.05) #生成包含20个节点、以概率0.2连接的随机图 pos = nx.shell_layout(ER) #定义一个布局,此处采用了shell布局方式 nx.draw(ER,pos,with_labels=False,node_size = 30) plt.show() plotDegreeDistribution(ER) """ Explanation: ER随机网络 End of explanation """ import networkx as nx import matplotlib.pyplot as plt WS = nx.random_graphs.watts_strogatz_graph(200,4,0.3) #生成包含200个节点、每个节点4个近邻、随机化重连概率为0.3的小世界网络 pos = nx.circular_layout(WS) #定义一个布局,此处采用了circular布局方式 nx.draw(WS,pos,with_labels=False,node_size = 30) #绘制图形 plt.show() plotDegreeDistribution(WS) nx.diameter(WS) cc = nx.clustering(WS) plt.hist(cc.values(), bins = 10) plt.xlabel('$Clustering \, Coefficient, \, C$', fontsize = 20) plt.ylabel('$Frequency, \, F$', fontsize = 20) plt.show() import numpy as np np.mean(cc.values()) """ Explanation: 小世界网络 End of explanation """ import networkx as nx import matplotlib.pyplot as plt BA= nx.random_graphs.barabasi_albert_graph(200,2) #生成n=20、m=1的BA无标度网络 pos = nx.spring_layout(BA) #定义一个布局,此处采用了spring布局方式 nx.draw(BA,pos,with_labels=False,node_size = 30) #绘制图形 plt.show() plotDegreeDistribution(BA) """ Explanation: BA网络 End of explanation """ Ns = [i*10 for i in [1, 10, 100, 1000]] ds = [] for N in Ns: print N BA= nx.random_graphs.barabasi_albert_graph(N,2) d = nx.average_shortest_path_length(BA) ds.append(d) plt.plot(Ns, ds, 'r-o') plt.xlabel('$N$', fontsize = 20) plt.ylabel('$<d>$', fontsize = 20) plt.xscale('log') plt.show() """ Explanation: 作业: 阅读 Barabasi (1999) Internet Diameter of the world wide web.Nature.401 绘制www网络的出度分布、入度分布 使用BA模型生成节点数为N、幂指数为$\gamma$的网络 计算平均路径长度d与节点数量的关系 <img src = './img/diameter.png' width = 10000> End of explanation """ # subgraph G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc G.add_path([0,1,2,3]) H = G.subgraph([0,1,2]) G.edges(), H.edges() """ Explanation: More http://computational-communication.com/wiki/index.php?title=Networkx End of explanation """
GoogleCloudPlatform/vertex-ai-samples
notebooks/community/gapic/custom/showcase_custom_image_classification_online_pipeline.ipynb
apache-2.0
import os import sys # Google Cloud Notebook if os.path.exists("/opt/deeplearning/metadata/env_version"): USER_FLAG = "--user" else: USER_FLAG = "" ! pip3 install -U google-cloud-aiplatform $USER_FLAG """ Explanation: Vertex client library: Custom training image classification model with pipeline for online prediction with training pipeline <table align="left"> <td> <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/custom/showcase_custom_image_classification_online_pipeline.ipynb"> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab </a> </td> <td> <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/custom/showcase_custom_image_classification_online_pipeline.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> </table> <br/><br/><br/> Overview This tutorial demonstrates how to use the Vertex client library for Python to train and deploy a custom image classification model for online prediction, using a training pipeline. Dataset The dataset used for this tutorial is the CIFAR10 dataset from TensorFlow Datasets. The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck. Objective In this tutorial, you create a custom model from a Python script in a Google prebuilt Docker container using the Vertex client library, and then do a prediction on the deployed model by sending data. You can alternatively create custom models using gcloud command-line tool or online using Google Cloud Console. The steps performed include: Create a Vertex custom job for training a model. Create a TrainingPipeline resource. Train a TensorFlow model with the TrainingPipeline resource. Retrieve and load the model artifacts. View the model evaluation. Upload the model as a Vertex Model resource. Deploy the Model resource to a serving Endpoint resource. Make a prediction. Undeploy the Model resource. Costs This tutorial uses billable components of Google Cloud (GCP): Vertex AI Cloud Storage Learn about Vertex AI pricing and Cloud Storage pricing, and use the Pricing Calculator to generate a cost estimate based on your projected usage. Installation Install the latest version of Vertex client library. End of explanation """ ! pip3 install -U google-cloud-storage $USER_FLAG """ Explanation: Install the latest GA version of google-cloud-storage library as well. End of explanation """ if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) """ Explanation: Restart the kernel Once you've installed the Vertex client library and Google cloud-storage, you need to restart the notebook kernel so it can find the packages. End of explanation """ PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID """ Explanation: Before you begin GPU runtime Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select Runtime > Change Runtime Type > GPU Set up your Google Cloud project The following steps are required, regardless of your notebook environment. Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs. Make sure that billing is enabled for your project. Enable the Vertex APIs and Compute Engine APIs. The Google Cloud SDK is already installed in Google Cloud Notebook. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $ into these commands. End of explanation """ REGION = "us-central1" # @param {type: "string"} """ Explanation: Region You can also change the REGION variable, which is used for operations throughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you. Americas: us-central1 Europe: europe-west4 Asia Pacific: asia-east1 You may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the Vertex locations documentation End of explanation """ from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") """ Explanation: Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial. End of explanation """ # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on Google Cloud Notebook, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' """ Explanation: Authenticate your Google Cloud account If you are using Google Cloud Notebook, your environment is already authenticated. Skip this step. If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. Otherwise, follow these steps: In the Cloud Console, go to the Create service account key page. Click Create service account. In the Service account name field, enter a name, and click Create. In the Grant this service account access to project section, click the Role drop-down list. Type "Vertex" into the filter box, and select Vertex Administrator. Type "Storage Object Admin" into the filter box, and select Storage Object Admin. Click Create. A JSON file that contains your key downloads to your local environment. Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell. End of explanation """ BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP """ Explanation: Create a Cloud Storage bucket The following steps are required, regardless of your notebook environment. When you submit a custom training job using the Vertex client library, you upload a Python package containing your training code to a Cloud Storage bucket. Vertex runs the code from this package. In this tutorial, Vertex also saves the trained model that results from your job in the same bucket. You can then create an Endpoint resource based on this output in order to serve online predictions. Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization. End of explanation """ ! gsutil mb -l $REGION $BUCKET_NAME """ Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket. End of explanation """ ! gsutil ls -al $BUCKET_NAME """ Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents: End of explanation """ import time from google.cloud.aiplatform import gapic as aip from google.protobuf import json_format from google.protobuf.struct_pb2 import Value """ Explanation: Set up variables Next, set up some variables used throughout the tutorial. Import libraries and define constants Import Vertex client library Import the Vertex client library into our Python environment. End of explanation """ # API service endpoint API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION) # Vertex location root path for your dataset, model and endpoint resources PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION """ Explanation: Vertex constants Setup up the following constants for Vertex: API_ENDPOINT: The Vertex API service endpoint for dataset, model, job, pipeline and endpoint services. PARENT: The Vertex location root path for dataset, model, job, pipeline and endpoint resources. End of explanation """ CUSTOM_TASK_GCS_PATH = ( "gs://google-cloud-aiplatform/schema/trainingjob/definition/custom_task_1.0.0.yaml" ) """ Explanation: CustomJob constants Set constants unique to CustomJob training: Dataset Training Schemas: Tells the Pipeline resource service the task (e.g., classification) to train the model for. End of explanation """ if os.getenv("IS_TESTING_TRAIN_GPU"): TRAIN_GPU, TRAIN_NGPU = ( aip.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_TRAIN_GPU")), ) else: TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1) if os.getenv("IS_TESTING_DEPOLY_GPU"): DEPLOY_GPU, DEPLOY_NGPU = ( aip.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_DEPOLY_GPU")), ) else: DEPLOY_GPU, DEPLOY_NGPU = (None, None) """ Explanation: Hardware Accelerators Set the hardware accelerators (e.g., GPU), if any, for training and prediction. Set the variables TRAIN_GPU/TRAIN_NGPU and DEPLOY_GPU/DEPLOY_NGPU to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4) For GPU, available accelerators include: - aip.AcceleratorType.NVIDIA_TESLA_K80 - aip.AcceleratorType.NVIDIA_TESLA_P100 - aip.AcceleratorType.NVIDIA_TESLA_P4 - aip.AcceleratorType.NVIDIA_TESLA_T4 - aip.AcceleratorType.NVIDIA_TESLA_V100 Otherwise specify (None, None) to use a container image to run on a CPU. Note: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support. End of explanation """ if os.getenv("IS_TESTING_TF"): TF = os.getenv("IS_TESTING_TF") else: TF = "2-1" if TF[0] == "2": if TRAIN_GPU: TRAIN_VERSION = "tf-gpu.{}".format(TF) else: TRAIN_VERSION = "tf-cpu.{}".format(TF) if DEPLOY_GPU: DEPLOY_VERSION = "tf2-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf2-cpu.{}".format(TF) else: if TRAIN_GPU: TRAIN_VERSION = "tf-gpu.{}".format(TF) else: TRAIN_VERSION = "tf-cpu.{}".format(TF) if DEPLOY_GPU: DEPLOY_VERSION = "tf-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf-cpu.{}".format(TF) TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION) DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION) print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU) print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU) """ Explanation: Container (Docker) image Next, we will set the Docker container images for training and prediction TensorFlow 1.15 gcr.io/cloud-aiplatform/training/tf-cpu.1-15:latest gcr.io/cloud-aiplatform/training/tf-gpu.1-15:latest TensorFlow 2.1 gcr.io/cloud-aiplatform/training/tf-cpu.2-1:latest gcr.io/cloud-aiplatform/training/tf-gpu.2-1:latest TensorFlow 2.2 gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest gcr.io/cloud-aiplatform/training/tf-gpu.2-2:latest TensorFlow 2.3 gcr.io/cloud-aiplatform/training/tf-cpu.2-3:latest gcr.io/cloud-aiplatform/training/tf-gpu.2-3:latest TensorFlow 2.4 gcr.io/cloud-aiplatform/training/tf-cpu.2-4:latest gcr.io/cloud-aiplatform/training/tf-gpu.2-4:latest XGBoost gcr.io/cloud-aiplatform/training/xgboost-cpu.1-1 Scikit-learn gcr.io/cloud-aiplatform/training/scikit-learn-cpu.0-23:latest Pytorch gcr.io/cloud-aiplatform/training/pytorch-cpu.1-4:latest gcr.io/cloud-aiplatform/training/pytorch-cpu.1-5:latest gcr.io/cloud-aiplatform/training/pytorch-cpu.1-6:latest gcr.io/cloud-aiplatform/training/pytorch-cpu.1-7:latest For the latest list, see Pre-built containers for training. TensorFlow 1.15 gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest TensorFlow 2.1 gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-1:latest TensorFlow 2.2 gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-2:latest TensorFlow 2.3 gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-3:latest gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-3:latest XGBoost gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-2:latest gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-1:latest gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-90:latest gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-82:latest Scikit-learn gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-23:latest gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-22:latest gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-20:latest For the latest list, see Pre-built containers for prediction End of explanation """ if os.getenv("IS_TESTING_TRAIN_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Train machine type", TRAIN_COMPUTE) if os.getenv("IS_TESTING_DEPLOY_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Deploy machine type", DEPLOY_COMPUTE) """ Explanation: Machine Type Next, set the machine type to use for training and prediction. Set the variables TRAIN_COMPUTE and DEPLOY_COMPUTE to configure the compute resources for the VMs you will use for for training and prediction. machine type n1-standard: 3.75GB of memory per vCPU. n1-highmem: 6.5GB of memory per vCPU n1-highcpu: 0.9 GB of memory per vCPU vCPUs: number of [2, 4, 8, 16, 32, 64, 96 ] Note: The following is not supported for training: standard: 2 vCPUs highcpu: 2, 4 and 8 vCPUs Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs. End of explanation """ # client options same for all services client_options = {"api_endpoint": API_ENDPOINT} def create_model_client(): client = aip.ModelServiceClient(client_options=client_options) return client def create_pipeline_client(): client = aip.PipelineServiceClient(client_options=client_options) return client def create_endpoint_client(): client = aip.EndpointServiceClient(client_options=client_options) return client def create_prediction_client(): client = aip.PredictionServiceClient(client_options=client_options) return client clients = {} clients["model"] = create_model_client() clients["pipeline"] = create_pipeline_client() clients["endpoint"] = create_endpoint_client() clients["prediction"] = create_prediction_client() for client in clients.items(): print(client) """ Explanation: Tutorial Now you are ready to start creating your own custom model and training for CIFAR10. Set up clients The Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server. You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront. Model Service for Model resources. Pipeline Service for training. Endpoint Service for deployment. Job Service for batch jobs and custom training. Prediction Service for serving. End of explanation """ if TRAIN_GPU: machine_spec = { "machine_type": TRAIN_COMPUTE, "accelerator_type": TRAIN_GPU, "accelerator_count": TRAIN_NGPU, } else: machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0} """ Explanation: Train a model There are two ways you can train a custom model using a container image: Use a Google Cloud prebuilt container. If you use a prebuilt container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model. Use your own custom container image. If you use your own container, the container needs to contain your code for training a custom model. Prepare your custom job specification Now that your clients are ready, your first step is to create a Job Specification for your custom training job. The job specification will consist of the following: worker_pool_spec : The specification of the type of machine(s) you will use for training and how many (single or distributed) python_package_spec : The specification of the Python package to be installed with the pre-built container. Prepare your machine specification Now define the machine specification for your custom training job. This tells Vertex what type of machine instance to provision for the training. - machine_type: The type of GCP instance to provision -- e.g., n1-standard-8. - accelerator_type: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable TRAIN_GPU != None, you are using a GPU; otherwise you will use a CPU. - accelerator_count: The number of accelerators. End of explanation """ DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard] DISK_SIZE = 200 # GB disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE} """ Explanation: Prepare your disk specification (optional) Now define the disk specification for your custom training job. This tells Vertex what type and size of disk to provision in each machine instance for the training. boot_disk_type: Either SSD or Standard. SSD is faster, and Standard is less expensive. Defaults to SSD. boot_disk_size_gb: Size of disk in GB. End of explanation """ JOB_NAME = "custom_job_" + TIMESTAMP MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME) if not TRAIN_NGPU or TRAIN_NGPU < 2: TRAIN_STRATEGY = "single" else: TRAIN_STRATEGY = "mirror" EPOCHS = 20 STEPS = 100 DIRECT = True if DIRECT: CMDARGS = [ "--model-dir=" + MODEL_DIR, "--epochs=" + str(EPOCHS), "--steps=" + str(STEPS), "--distribute=" + TRAIN_STRATEGY, ] else: CMDARGS = [ "--epochs=" + str(EPOCHS), "--steps=" + str(STEPS), "--distribute=" + TRAIN_STRATEGY, ] worker_pool_spec = [ { "replica_count": 1, "machine_spec": machine_spec, "disk_spec": disk_spec, "python_package_spec": { "executor_image_uri": TRAIN_IMAGE, "package_uris": [BUCKET_NAME + "/trainer_cifar10.tar.gz"], "python_module": "trainer.task", "args": CMDARGS, }, } ] """ Explanation: Define the worker pool specification Next, you define the worker pool specification for your custom training job. The worker pool specification will consist of the following: replica_count: The number of instances to provision of this machine type. machine_spec: The hardware specification. disk_spec : (optional) The disk storage specification. python_package: The Python training package to install on the VM instance(s) and which Python module to invoke, along with command line arguments for the Python module. Let's dive deeper now into the python package specification: -executor_image_spec: This is the docker image which is configured for your custom training job. -package_uris: This is a list of the locations (URIs) of your python training packages to install on the provisioned instance. The locations need to be in a Cloud Storage bucket. These can be either individual python files or a zip (archive) of an entire package. In the later case, the job service will unzip (unarchive) the contents into the docker image. -python_module: The Python module (script) to invoke for running the custom training job. In this example, you will be invoking trainer.task.py -- note that it was not neccessary to append the .py suffix. -args: The command line arguments to pass to the corresponding Pythom module. In this example, you will be setting: - "--model-dir=" + MODEL_DIR : The Cloud Storage location where to store the model artifacts. There are two ways to tell the training script where to save the model artifacts: - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable DIRECT = True), or - indirect: The service passes the Cloud Storage location as the environment variable AIP_MODEL_DIR to your training script (set variable DIRECT = False). In this case, you tell the service the model artifact location in the job specification. - "--epochs=" + EPOCHS: The number of epochs for training. - "--steps=" + STEPS: The number of steps (batches) per epoch. - "--distribute=" + TRAIN_STRATEGY" : The training distribution strategy to use for single or distributed training. - "single": single device. - "mirror": all GPU devices on a single compute instance. - "multi": all GPU devices on all compute instances. End of explanation """ # Make folder for Python training script ! rm -rf custom ! mkdir custom # Add package information ! touch custom/README.md setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0" ! echo "$setup_cfg" > custom/setup.cfg setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())" ! echo "$setup_py" > custom/setup.py pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex" ! echo "$pkg_info" > custom/PKG-INFO # Make the training subfolder ! mkdir custom/trainer ! touch custom/trainer/__init__.py """ Explanation: Examine the training package Package layout Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout. PKG-INFO README.md setup.cfg setup.py trainer __init__.py task.py The files setup.cfg and setup.py are the instructions for installing the package into the operating environment of the Docker image. The file trainer/task.py is the Python script for executing the custom training job. Note, when we referred to it in the worker pool specification, we replace the directory slash with a dot (trainer.task) and dropped the file suffix (.py). Package Assembly In the following cells, you will assemble the training package. End of explanation """ %%writefile custom/trainer/task.py # Single, Mirror and Multi-Machine Distributed Training for CIFAR-10 import tensorflow_datasets as tfds import tensorflow as tf from tensorflow.python.client import device_lib import argparse import os import sys tfds.disable_progress_bar() parser = argparse.ArgumentParser() parser.add_argument('--model-dir', dest='model_dir', default=os.getenv("AIP_MODEL_DIR"), type=str, help='Model dir.') parser.add_argument('--lr', dest='lr', default=0.01, type=float, help='Learning rate.') parser.add_argument('--epochs', dest='epochs', default=10, type=int, help='Number of epochs.') parser.add_argument('--steps', dest='steps', default=200, type=int, help='Number of steps per epoch.') parser.add_argument('--distribute', dest='distribute', type=str, default='single', help='distributed training strategy') args = parser.parse_args() print('Python Version = {}'.format(sys.version)) print('TensorFlow Version = {}'.format(tf.__version__)) print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found'))) print('DEVICES', device_lib.list_local_devices()) # Single Machine, single compute device if args.distribute == 'single': if tf.test.is_gpu_available(): strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") else: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") # Single Machine, multiple compute device elif args.distribute == 'mirror': strategy = tf.distribute.MirroredStrategy() # Multiple Machine, multiple compute device elif args.distribute == 'multi': strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() # Multi-worker configuration print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync)) # Preparing dataset BUFFER_SIZE = 10000 BATCH_SIZE = 64 def make_datasets_unbatched(): # Scaling CIFAR10 data from (0, 255] to (0., 1.] def scale(image, label): image = tf.cast(image, tf.float32) image /= 255.0 return image, label datasets, info = tfds.load(name='cifar10', with_info=True, as_supervised=True) return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat() # Build the Keras model def build_and_compile_cnn_model(): model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile( loss=tf.keras.losses.sparse_categorical_crossentropy, optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr), metrics=['accuracy']) return model # Train the model NUM_WORKERS = strategy.num_replicas_in_sync # Here the batch size scales up by number of workers since # `tf.data.Dataset.batch` expects the global batch size. GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS train_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE) with strategy.scope(): # Creation of dataset, and model building/compiling need to be within # `strategy.scope()`. model = build_and_compile_cnn_model() model.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps) model.save(args.model_dir) """ Explanation: Task.py contents In the next cell, you write the contents of the training script task.py. We won't go into detail, it's just there for you to browse. In summary: Get the directory where to save the model artifacts from the command line (--model_dir), and if not specified, then from the environment variable AIP_MODEL_DIR. Loads CIFAR10 dataset from TF Datasets (tfds). Builds a model using TF.Keras model API. Compiles the model (compile()). Sets a training distribution strategy according to the argument args.distribute. Trains the model (fit()) with epochs and steps according to the arguments args.epochs and args.steps Saves the trained model (save(args.model_dir)) to the specified model directory. End of explanation """ ! rm -f custom.tar custom.tar.gz ! tar cvf custom.tar custom ! gzip custom.tar ! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_cifar10.tar.gz """ Explanation: Store training script on your Cloud Storage bucket Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket. End of explanation """ MODEL_NAME = "custom_pipeline-" + TIMESTAMP PIPELINE_DISPLAY_NAME = "custom-training-pipeline" + TIMESTAMP training_task_inputs = json_format.ParseDict( {"workerPoolSpecs": worker_pool_spec}, Value() ) pipeline = { "display_name": PIPELINE_DISPLAY_NAME, "training_task_definition": CUSTOM_TASK_GCS_PATH, "training_task_inputs": training_task_inputs, "model_to_upload": { "display_name": PIPELINE_DISPLAY_NAME + "-model", "artifact_uri": MODEL_DIR, "container_spec": {"image_uri": DEPLOY_IMAGE}, }, } print(pipeline) """ Explanation: Train the model using a TrainingPipeline resource Now start training of your custom training job using a training pipeline on Vertex. To train the your custom model, do the following steps: Create a Vertex TrainingPipeline resource for the Dataset resource. Execute the pipeline to start the training. Create a TrainingPipeline resource You may ask, what do we use a pipeline for? We typically use pipelines when the job (such as training) has multiple steps, generally in sequential order: do step A, do step B, etc. By putting the steps into a pipeline, we gain the benefits of: Being reusable for subsequent training jobs. Can be containerized and ran as a batch job. Can be distributed. All the steps are associated with the same pipeline job for tracking progress. The training_pipeline specification First, you need to describe a pipeline specification. Let's look into the minimal requirements for constructing a training_pipeline specification for a custom job: display_name: A human readable name for the pipeline job. training_task_definition: The training task schema. training_task_inputs: A dictionary describing the requirements for the training job. model_to_upload: A dictionary describing the specification for the (uploaded) Vertex custom Model resource. display_name: A human readable name for the Model resource. artificat_uri: The Cloud Storage path where the model artifacts are stored in SavedModel format. container_spec: This is the specification for the Docker container that will be installed on the Endpoint resource, from which the custom model will serve predictions. End of explanation """ def create_pipeline(training_pipeline): try: pipeline = clients["pipeline"].create_training_pipeline( parent=PARENT, training_pipeline=training_pipeline ) print(pipeline) except Exception as e: print("exception:", e) return None return pipeline response = create_pipeline(pipeline) """ Explanation: Create the training pipeline Use this helper function create_pipeline, which takes the following parameter: training_pipeline: the full specification for the pipeline training job. The helper function calls the pipeline client service's create_pipeline method, which takes the following parameters: parent: The Vertex location root path for your Dataset, Model and Endpoint resources. training_pipeline: The full specification for the pipeline training job. The helper function will return the Vertex fully qualified identifier assigned to the training pipeline, which is saved as pipeline.name. End of explanation """ # The full unique ID for the pipeline pipeline_id = response.name # The short numeric ID for the pipeline pipeline_short_id = pipeline_id.split("/")[-1] print(pipeline_id) """ Explanation: Now save the unique identifier of the training pipeline you created. End of explanation """ def get_training_pipeline(name, silent=False): response = clients["pipeline"].get_training_pipeline(name=name) if silent: return response print("pipeline") print(" name:", response.name) print(" display_name:", response.display_name) print(" state:", response.state) print(" training_task_definition:", response.training_task_definition) print(" training_task_inputs:", dict(response.training_task_inputs)) print(" create_time:", response.create_time) print(" start_time:", response.start_time) print(" end_time:", response.end_time) print(" update_time:", response.update_time) print(" labels:", dict(response.labels)) return response response = get_training_pipeline(pipeline_id) """ Explanation: Get information on a training pipeline Now get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's get_training_pipeline method, with the following parameter: name: The Vertex fully qualified pipeline identifier. When the model is done training, the pipeline state will be PIPELINE_STATE_SUCCEEDED. End of explanation """ while True: response = get_training_pipeline(pipeline_id, True) if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED: print("Training job has not completed:", response.state) model_to_deploy_id = None if response.state == aip.PipelineState.PIPELINE_STATE_FAILED: raise Exception("Training Job Failed") else: model_to_deploy = response.model_to_upload model_to_deploy_id = model_to_deploy.name print("Training Time:", response.end_time - response.start_time) break time.sleep(60) print("model to deploy:", model_to_deploy_id) if not DIRECT: MODEL_DIR = MODEL_DIR + "/model" model_path_to_deploy = MODEL_DIR """ Explanation: Deployment Training the above model may take upwards of 20 minutes time. Once your model is done training, you can calculate the actual time it took to train the model by subtracting end_time from start_time. For your model, you will need to know the fully qualified Vertex Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field model_to_deploy.name. End of explanation """ import tensorflow as tf model = tf.keras.models.load_model(MODEL_DIR) """ Explanation: Load the saved model Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction. To load, you use the TF.Keras model.load_model() method passing it the Cloud Storage path where the model is saved -- specified by MODEL_DIR. End of explanation """ import numpy as np from tensorflow.keras.datasets import cifar10 (_, _), (x_test, y_test) = cifar10.load_data() x_test = (x_test / 255.0).astype(np.float32) print(x_test.shape, y_test.shape) """ Explanation: Evaluate the model Now find out how good the model is. Load evaluation data You will load the CIFAR10 test (holdout) data from tf.keras.datasets, using the method load_data(). This will return the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the image data, and the corresponding labels. You don't need the training data, and hence why we loaded it as (_, _). Before you can run the data through evaluation, you need to preprocess it: x_test: 1. Normalize (rescaling) the pixel data by dividing each pixel by 255. This will replace each single byte integer pixel with a 32-bit floating point number between 0 and 1. y_test:<br/> 2. The labels are currently scalar (sparse). If you look back at the compile() step in the trainer/task.py script, you will find that it was compiled for sparse labels. So we don't need to do anything more. End of explanation """ model.evaluate(x_test, y_test) """ Explanation: Perform the model evaluation Now evaluate how well the model in the custom job did. End of explanation """ CONCRETE_INPUT = "numpy_inputs" def _preprocess(bytes_input): decoded = tf.io.decode_jpeg(bytes_input, channels=3) decoded = tf.image.convert_image_dtype(decoded, tf.float32) resized = tf.image.resize(decoded, size=(32, 32)) return resized @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def preprocess_fn(bytes_inputs): decoded_images = tf.map_fn( _preprocess, bytes_inputs, dtype=tf.float32, back_prop=False ) return { CONCRETE_INPUT: decoded_images } # User needs to make sure the key matches model's input @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def serving_fn(bytes_inputs): images = preprocess_fn(bytes_inputs) prob = m_call(**images) return prob m_call = tf.function(model.call).get_concrete_function( [tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=CONCRETE_INPUT)] ) tf.saved_model.save( model, model_path_to_deploy, signatures={"serving_default": serving_fn} ) """ Explanation: Upload the model for serving Next, you will upload your TF.Keras model from the custom job to Vertex Model service, which will create a Vertex Model resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex, your serving function ensures that the data is decoded on the model server before it is passed as input to your model. How does the serving function work When you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a tf.string. The serving function consists of two parts: preprocessing function: Converts the input (tf.string) to the input shape and data type of the underlying model (dynamic graph). Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc. post-processing function: Converts the model output to format expected by the receiving application -- e.q., compresses the output. Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc. Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content. One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported. Serving function for image data To pass images to the prediction service, you encode the compressed (e.g., JPEG) image bytes into base 64 -- which makes the content safe from modification while transmitting binary data over the network. Since this deployed model expects input data as raw (uncompressed) bytes, you need to ensure that the base 64 encoded data gets converted back to raw bytes before it is passed as input to the deployed model. To resolve this, define a serving function (serving_fn) and attach it to the model as a preprocessing step. Add a @tf.function decorator so the serving function is fused to the underlying model (instead of upstream on a CPU). When you send a prediction or explanation request, the content of the request is base 64 decoded into a Tensorflow string (tf.string), which is passed to the serving function (serving_fn). The serving function preprocesses the tf.string into raw (uncompressed) numpy bytes (preprocess_fn) to match the input requirements of the model: - io.decode_jpeg- Decompresses the JPG image which is returned as a Tensorflow tensor with three channels (RGB). - image.convert_image_dtype - Changes integer pixel values to float 32, and rescales pixel data between 0 and 1. - image.resize - Resizes the image to match the input shape for the model. At this point, the data can be passed to the model (m_call). End of explanation """ loaded = tf.saved_model.load(model_path_to_deploy) serving_input = list( loaded.signatures["serving_default"].structured_input_signature[1].keys() )[0] print("Serving function input:", serving_input) """ Explanation: Get the serving function signature You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer. For your purpose, you need the signature of the serving function. Why? Well, when we send our data for prediction as a HTTP request packet, the image data is base64 encoded, and our TF.Keras model takes numpy input. Your serving function will do the conversion from base64 to a numpy array. When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request. End of explanation """ IMAGE_URI = DEPLOY_IMAGE def upload_model(display_name, image_uri, model_uri): model = { "display_name": display_name, "metadata_schema_uri": "", "artifact_uri": model_uri, "container_spec": { "image_uri": image_uri, "command": [], "args": [], "env": [{"name": "env_name", "value": "env_value"}], "ports": [{"container_port": 8080}], "predict_route": "", "health_route": "", }, } response = clients["model"].upload_model(parent=PARENT, model=model) print("Long running operation:", response.operation.name) upload_model_response = response.result(timeout=180) print("upload_model_response") print(" model:", upload_model_response.model) return upload_model_response.model model_to_deploy_id = upload_model( "cifar10-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy ) """ Explanation: Upload the model Use this helper function upload_model to upload your model, stored in SavedModel format, up to the Model service, which will instantiate a Vertex Model resource instance for your model. Once you've done that, you can use the Model resource instance in the same way as any other Vertex Model resource instance, such as deploying to an Endpoint resource for serving predictions. The helper function takes the following parameters: display_name: A human readable name for the Endpoint service. image_uri: The container image for the model deployment. model_uri: The Cloud Storage path to our SavedModel artificat. For this tutorial, this is the Cloud Storage location where the trainer/task.py saved the model artifacts, which we specified in the variable MODEL_DIR. The helper function calls the Model client service's method upload_model, which takes the following parameters: parent: The Vertex location root path for Dataset, Model and Endpoint resources. model: The specification for the Vertex Model resource instance. Let's now dive deeper into the Vertex model specification model. This is a dictionary object that consists of the following fields: display_name: A human readable name for the Model resource. metadata_schema_uri: Since your model was built without an Vertex Dataset resource, you will leave this blank (''). artificat_uri: The Cloud Storage path where the model is stored in SavedModel format. container_spec: This is the specification for the Docker container that will be installed on the Endpoint resource, from which the Model resource will serve predictions. Use the variable you set earlier DEPLOY_GPU != None to use a GPU; otherwise only a CPU is allocated. Uploading a model into a Vertex Model resource returns a long running operation, since it may take a few moments. You call response.result(), which is a synchronous call and will return when the Vertex Model resource is ready. The helper function returns the Vertex fully qualified identifier for the corresponding Vertex Model instance upload_model_response.model. You will save the identifier for subsequent steps in the variable model_to_deploy_id. End of explanation """ def get_model(name): response = clients["model"].get_model(name=name) print(response) get_model(model_to_deploy_id) """ Explanation: Get Model resource information Now let's get the model information for just your model. Use this helper function get_model, with the following parameter: name: The Vertex unique identifier for the Model resource. This helper function calls the Vertex Model client service's method get_model, with the following parameter: name: The Vertex unique identifier for the Model resource. End of explanation """ ENDPOINT_NAME = "cifar10_endpoint-" + TIMESTAMP def create_endpoint(display_name): endpoint = {"display_name": display_name} response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint) print("Long running operation:", response.operation.name) result = response.result(timeout=300) print("result") print(" name:", result.name) print(" display_name:", result.display_name) print(" description:", result.description) print(" labels:", result.labels) print(" create_time:", result.create_time) print(" update_time:", result.update_time) return result result = create_endpoint(ENDPOINT_NAME) """ Explanation: Deploy the Model resource Now deploy the trained Vertex custom Model resource. This requires two steps: Create an Endpoint resource for deploying the Model resource to. Deploy the Model resource to the Endpoint resource. Create an Endpoint resource Use this helper function create_endpoint to create an endpoint to deploy the model to for serving predictions, with the following parameter: display_name: A human readable name for the Endpoint resource. The helper function uses the endpoint client service's create_endpoint method, which takes the following parameter: display_name: A human readable name for the Endpoint resource. Creating an Endpoint resource returns a long running operation, since it may take a few moments to provision the Endpoint resource for serving. You call response.result(), which is a synchronous call and will return when the Endpoint resource is ready. The helper function returns the Vertex fully qualified identifier for the Endpoint resource: response.name. End of explanation """ # The full unique ID for the endpoint endpoint_id = result.name # The short numeric ID for the endpoint endpoint_short_id = endpoint_id.split("/")[-1] print(endpoint_id) """ Explanation: Now get the unique identifier for the Endpoint resource you created. End of explanation """ MIN_NODES = 1 MAX_NODES = 1 """ Explanation: Compute instance scaling You have several choices on scaling the compute instances for handling your online prediction requests: Single Instance: The online prediction requests are processed on a single compute instance. Set the minimum (MIN_NODES) and maximum (MAX_NODES) number of compute instances to one. Manual Scaling: The online prediction requests are split across a fixed number of compute instances that you manually specified. Set the minimum (MIN_NODES) and maximum (MAX_NODES) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them. Auto Scaling: The online prediction requests are split across a scaleable number of compute instances. Set the minimum (MIN_NODES) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions. The minimum number of compute instances corresponds to the field min_replica_count and the maximum number of compute instances corresponds to the field max_replica_count, in your subsequent deployment request. End of explanation """ DEPLOYED_NAME = "cifar10_deployed-" + TIMESTAMP def deploy_model( model, deployed_model_display_name, endpoint, traffic_split={"0": 100} ): if DEPLOY_GPU: machine_spec = { "machine_type": DEPLOY_COMPUTE, "accelerator_type": DEPLOY_GPU, "accelerator_count": DEPLOY_NGPU, } else: machine_spec = { "machine_type": DEPLOY_COMPUTE, "accelerator_count": 0, } deployed_model = { "model": model, "display_name": deployed_model_display_name, "dedicated_resources": { "min_replica_count": MIN_NODES, "max_replica_count": MAX_NODES, "machine_spec": machine_spec, }, "disable_container_logging": False, } response = clients["endpoint"].deploy_model( endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split ) print("Long running operation:", response.operation.name) result = response.result() print("result") deployed_model = result.deployed_model print(" deployed_model") print(" id:", deployed_model.id) print(" model:", deployed_model.model) print(" display_name:", deployed_model.display_name) print(" create_time:", deployed_model.create_time) return deployed_model.id deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id) """ Explanation: Deploy Model resource to the Endpoint resource Use this helper function deploy_model to deploy the Model resource to the Endpoint resource you created for serving predictions, with the following parameters: model: The Vertex fully qualified model identifier of the model to upload (deploy) from the training pipeline. deploy_model_display_name: A human readable name for the deployed model. endpoint: The Vertex fully qualified endpoint identifier to deploy the model to. The helper function calls the Endpoint client service's method deploy_model, which takes the following parameters: endpoint: The Vertex fully qualified Endpoint resource identifier to deploy the Model resource to. deployed_model: The requirements specification for deploying the model. traffic_split: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs. If only one model, then specify as { "0": 100 }, where "0" refers to this model being uploaded and 100 means 100% of the traffic. If there are existing models on the endpoint, for which the traffic will be split, then use model_id to specify as { "0": percent, model_id: percent, ... }, where model_id is the model id of an existing model to the deployed endpoint. The percents must add up to 100. Let's now dive deeper into the deployed_model parameter. This parameter is specified as a Python dictionary with the minimum required fields: model: The Vertex fully qualified model identifier of the (upload) model to deploy. display_name: A human readable name for the deployed model. disable_container_logging: This disables logging of container events, such as execution failures (default is container logging is enabled). Container logging is typically enabled when debugging the deployment and then disabled when deployed for production. dedicated_resources: This refers to how many compute instances (replicas) that are scaled for serving prediction requests. machine_spec: The compute instance to provision. Use the variable you set earlier DEPLOY_GPU != None to use a GPU; otherwise only a CPU is allocated. min_replica_count: The number of compute instances to initially provision, which you set earlier as the variable MIN_NODES. max_replica_count: The maximum number of compute instances to scale to, which you set earlier as the variable MAX_NODES. Traffic Split Let's now dive deeper into the traffic_split parameter. This parameter is specified as a Python dictionary. This might at first be a tad bit confusing. Let me explain, you can deploy more than one instance of your model to an endpoint, and then set how much (percent) goes to each instance. Why would you do that? Perhaps you already have a previous version deployed in production -- let's call that v1. You got better model evaluation on v2, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy v2 to the same endpoint as v1, but it only get's say 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision. Response The method returns a long running operation response. We will wait sychronously for the operation to complete by calling the response.result(), which will block until the model is deployed. If this is the first time a model is deployed to the endpoint, it may take a few additional minutes to complete provisioning of resources. End of explanation """ test_image = x_test[0] test_label = y_test[0] print(test_image.shape) """ Explanation: Make a online prediction request Now do a online prediction to your deployed model. Get test item You will use an example out of the test (holdout) portion of the dataset as a test item. End of explanation """ import base64 import cv2 cv2.imwrite("tmp.jpg", (test_image * 255).astype(np.uint8)) bytes = tf.io.read_file("tmp.jpg") b64str = base64.b64encode(bytes.numpy()).decode("utf-8") """ Explanation: Prepare the request content You are going to send the CIFAR10 image as compressed JPG image, instead of the raw uncompressed bytes: cv2.imwrite: Use openCV to write the uncompressed image to disk as a compressed JPEG image. Denormalize the image data from [0,1) range back to [0,255). Convert the 32-bit floating point values to 8-bit unsigned integers. tf.io.read_file: Read the compressed JPG images back into memory as raw bytes. base64.b64encode: Encode the raw bytes into a base 64 encoded string. End of explanation """ def predict_image(image, endpoint, parameters_dict): # The format of each instance should conform to the deployed model's prediction input schema. instances_list = [{serving_input: {"b64": image}}] instances = [json_format.ParseDict(s, Value()) for s in instances_list] response = clients["prediction"].predict( endpoint=endpoint, instances=instances, parameters=parameters_dict ) print("response") print(" deployed_model_id:", response.deployed_model_id) predictions = response.predictions print("predictions") for prediction in predictions: print(" prediction:", prediction) predict_image(b64str, endpoint_id, None) """ Explanation: Send the prediction request Ok, now you have a test image. Use this helper function predict_image, which takes the following parameters: image: The test image data as a numpy array. endpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model resource was deployed to. parameters_dict: Additional parameters for serving. This function calls the prediction client service predict method with the following parameters: endpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model resource was deployed to. instances: A list of instances (encoded images) to predict. parameters: Additional parameters for serving. To pass the image data to the prediction service, in the previous step you encoded the bytes into base64 -- which makes the content safe from modification when transmitting binary data over the network. You need to tell the serving binary where your model is deployed to, that the content has been base64 encoded, so it will decode it on the other end in the serving binary. Each instance in the prediction request is a dictionary entry of the form: {serving_input: {'b64': content}} input_name: the name of the input layer of the underlying model. 'b64': A key that indicates the content is base64 encoded. content: The compressed JPG image bytes as a base64 encoded string. Since the predict() service can take multiple images (instances), you will send your single image as a list of one image. As a final step, you package the instances list into Google's protobuf format -- which is what we pass to the predict() service. The response object returns a list, where each element in the list corresponds to the corresponding image in the request. You will see in the output for each prediction: predictions: Confidence level for the prediction, between 0 and 1, for each of the classes. End of explanation """ def undeploy_model(deployed_model_id, endpoint): response = clients["endpoint"].undeploy_model( endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={} ) print(response) undeploy_model(deployed_model_id, endpoint_id) """ Explanation: Undeploy the Model resource Now undeploy your Model resource from the serving Endpoint resoure. Use this helper function undeploy_model, which takes the following parameters: deployed_model_id: The model deployment identifier returned by the endpoint service when the Model resource was deployed to. endpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model is deployed to. This function calls the endpoint client service's method undeploy_model, with the following parameters: deployed_model_id: The model deployment identifier returned by the endpoint service when the Model resource was deployed. endpoint: The Vertex fully qualified identifier for the Endpoint resource where the Model resource is deployed. traffic_split: How to split traffic among the remaining deployed models on the Endpoint resource. Since this is the only deployed model on the Endpoint resource, you simply can leave traffic_split empty by setting it to {}. End of explanation """ delete_dataset = True delete_pipeline = True delete_model = True delete_endpoint = True delete_batchjob = True delete_customjob = True delete_hptjob = True delete_bucket = True # Delete the dataset using the Vertex fully qualified identifier for the dataset try: if delete_dataset and "dataset_id" in globals(): clients["dataset"].delete_dataset(name=dataset_id) except Exception as e: print(e) # Delete the training pipeline using the Vertex fully qualified identifier for the pipeline try: if delete_pipeline and "pipeline_id" in globals(): clients["pipeline"].delete_training_pipeline(name=pipeline_id) except Exception as e: print(e) # Delete the model using the Vertex fully qualified identifier for the model try: if delete_model and "model_to_deploy_id" in globals(): clients["model"].delete_model(name=model_to_deploy_id) except Exception as e: print(e) # Delete the endpoint using the Vertex fully qualified identifier for the endpoint try: if delete_endpoint and "endpoint_id" in globals(): clients["endpoint"].delete_endpoint(name=endpoint_id) except Exception as e: print(e) # Delete the batch job using the Vertex fully qualified identifier for the batch job try: if delete_batchjob and "batch_job_id" in globals(): clients["job"].delete_batch_prediction_job(name=batch_job_id) except Exception as e: print(e) # Delete the custom job using the Vertex fully qualified identifier for the custom job try: if delete_customjob and "job_id" in globals(): clients["job"].delete_custom_job(name=job_id) except Exception as e: print(e) # Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job try: if delete_hptjob and "hpt_job_id" in globals(): clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id) except Exception as e: print(e) if delete_bucket and "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAME """ Explanation: Cleaning up To clean up all GCP resources used in this project, you can delete the GCP project you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial: Dataset Pipeline Model Endpoint Batch Job Custom Job Hyperparameter Tuning Job Cloud Storage Bucket End of explanation """
junhwanjang/DataSchool
Lecture/02. 파이썬 프로그래밍/6) Numpy 시작하기.ipynb
mit
import numpy as np a = np.array([0, 1, 2, 3]) a """ Explanation: NumPy NumPy란 수치해석용 Python 라이브러리 C로 구현 (파이썬용 C라이브러리) BLAS/LAPACK 기반 빠른 수치 계산을 위한 Structured Array 제공 Home http://www.numpy.org/ Documentation http://docs.scipy.org/doc/ Tutorial http://www.scipy-lectures.org/intro/numpy/index.html https://docs.scipy.org/doc/numpy-dev/user/quickstart.html NumPy Array End of explanation """ L = range(1000) %timeit [i**2 for i in L] a = np.arange(1000) %timeit a**2 L = range(3) L L * 2 [i * 2 for i in L] a = np.arange(3) a a * 2 """ Explanation: Python List vs NumPy Array Python List 여러가지 타입의 원소 메모리 용량이 크고 속도가 느림 nesting 가능 전체 연산 불가 NumPy Array 동일 타입의 원소 메모리 최적화, 계산 속도 향상 크기(dimension)이 명확하게 정의 전체 연산 가능 End of explanation """ a = np.array([0, 1, 2, 3]) a a.ndim a.shape len(a) """ Explanation: Create Array (1D) End of explanation """ b = np.array([[0, 1, 2], [3, 4, 5]]) # 2 x 3 array b b.ndim b.shape len(b) a2 = np.array([[0, 1, 2, 3]]).T a2 a3 = np.array([[0], [1], [2], [3]]) a3 a2.shape """ Explanation: Create Array (2D) End of explanation """ c = np.array([[[1,2], [3,4]], [[5,6], [7,8]]]) c c.ndim c.shape len(c) """ Explanation: Create Array (3D) End of explanation """ a = np.arange(4) a a.shape b = np.array([[0, 1, 2, 3]]) b b.shape c = np.array([[0], [1], [2], [3]]) c c.shape """ Explanation: 1 dim vs 2 dim End of explanation """ a = np.array([[0, 1, 2, 3]]) a a.shape b = a.T b b.shape """ Explanation: Transpose End of explanation """ a = np.arange(10) # 0 .. n-1 (!) a b = np.arange(1, 9, 2) # start, end (exclusive), step b c = np.linspace(0, 1, 6) # start, end, num-points c d = np.linspace(0, 1, 5, endpoint=False) d a = np.ones((3, 3)) # reminder: (3, 3) is a tuple a b = np.zeros((2, 2)) b c = np.diag([1,2,3]) c d = np.eye(4) d a = np.array([0, 1, 2]) a np.tile(a, 2) np.tile(a, (3, 2)) np.tile(a, (2, 1, 2)) b = np.array([[1, 2], [3, 4]]) b np.tile(b, 2) np.tile(b, (2, 1)) """ Explanation: Array Creation Functions arange linspace, logspace zeros, ones rand, randn tile End of explanation """ a = np.arange(20) a b = np.reshape(a, (4, 5)) b c = a.reshape(4,5) c """ Explanation: Shape Change reshape flatten, ravel End of explanation """ a = np.arange(24) a.reshape(2, 12) a.reshape(2, -1) a.reshape(-1, 12) c d = c.flatten() # return a copy d d.base is None e = c.ravel() e e.base """ Explanation: 인수가 -1 numpy가 나머지 인수들을 이용하여 사이즈를 맞춘다. End of explanation """ a = np.arange(5) a np.vstack([a * 10, a * 20]) b = np.arange(5)[:, np.newaxis] b np.hstack([b * 10, b * 20]) a = np.array((1,2,3)) b = np.array((2,3,4)) a a.shape np.dstack((a,b)) a = np.array([[1],[2],[3]]) b = np.array([[2],[3],[4]]) np.dstack((a,b)) """ Explanation: Stack hstack vstack dstack End of explanation """ a = np.array([1, 2, 3]) a.dtype b = np.array([1., 2., 3.]) b.dtype c = np.array([1, 2, 3], dtype=np.float64) c.dtype d = np.array([1+2j, 3+4j, 5+6*1j]) d.dtype e = np.array([True, False, False, True]) e.dtype f = np.array(['Bonjour', 'Hello', 'Hallo',]) f.dtype """ Explanation: dtype bool Boolean (True or False) stored as a byte int8 Byte (-128 to 127) int16 Integer (-32768 to 32767) int32 Integer (-2147483648 to 2147483647) int64 Integer (-9223372036854775808 to 9223372036854775807) uint8 Unsigned integer (0 to 255) uint16 Unsigned integer (0 to 65535) uint32 Unsigned integer (0 to 4294967295) uint64 Unsigned integer (0 to 18446744073709551615) float16 Half precision float: sign bit, 5 bits exponent, 10 bits mantissa float32 Single precision float: sign bit, 8 bits exponent, 23 bits mantissa float64 Double precision float: sign bit, 11 bits exponent, 52 bits mantissa S String End of explanation """ x = np.array([1, -1, 0]) / np.array([0, 0, 0]) x x[0] np.inf, np.nan """ Explanation: NaN Not a Number Inf Infinity End of explanation """ a = np.arange(10) a a[0], a[2], a[-1] a[::-1] """ Explanation: Indexing End of explanation """ l = [[0,0,0],[0,1,0],[0,0,2]] l[1] l[1][1] a = np.diag(np.arange(3)) a a[1, 1] a[2, 1] = 10 # third line, second column a a[2] = [10, 20, 30] a """ Explanation: Multi-dimensional Indexing End of explanation """ a = np.arange(10) a a[2:9:3] # [start:end:step] a[:4] a[1:3] a[::2] a[3:] """ Explanation: Slicing End of explanation """ a = np.arange(6) + (np.arange(6) * 10)[:, np.newaxis] a a[0,:] a[:,0] """ Explanation: Multi-dimensional Slicing End of explanation """ a = np.arange(4) a a.shape b = np.arange(4).reshape(4,1) b b.shape c = np.arange(4)[:, np.newaxis] c c.shape """ Explanation: <img src="http://www.scipy-lectures.org/_images/numpy_indexing.png" style="width:70%; margin: 0 auto 0 auto;"> newaxis 차원 확장 End of explanation """ a = np.arange(10) a b = a[::2] b a[0] = 99 a b """ Explanation: View A slicing operation creates a view on the original array, which is just a way of accessing array data. Thus the original array is not copied in memory. End of explanation """ a = np.arange(5) a b = a.copy() b a[0] = 99 a b """ Explanation: Copy End of explanation """ a = np.arange(20) a a % 2 idx = (a % 2) == 0 idx a[idx] a[(a % 2) == 0] a = np.arange(50) * 10 a idx = [1, 3, 4, -1, 30] a[idx] a[[1,3,4,-1,30]] a = np.arange(6) + (np.arange(6) * 10)[:, np.newaxis] a a[[0,1,2,3,4],(1,2,3,4,5)] a[3:, [0,2,5]] """ Explanation: Fancy indexing 팬시 인덱싱 Boolean Fancy Indexing True인 원소만 선택 크기가 같아야 한다. list 또는 tuple, or array 지정된 인덱스만 선택 크기가 달라도 된다. multi dimension에도 사용 가능 create copy, not view End of explanation """ a = np.array([1, 2, 3, 4]) a a + 1 2**a b = np.ones(4) + 1 b a - b a + b c = np.ones((3, 3)) c c * c # element-wise, NOT Matrix product c.dot(c) # matrix product a = np.array([1, 2, 3, 4]) b = np.array([4, 2, 2, 4]) a == b a > b a = np.array([1, 2, 3, 4]) b = np.array([4, 2, 2, 4]) c = np.array([1, 2, 3, 4]) np.array_equal(a, b) np.array_equal(a, c) a = np.arange(5) np.sin(a) np.log(a) np.exp(a) np.log10(a) a = np.arange(4) b = np.array([1, 2]) a b a + b """ Explanation: Array Operation Elementwise operations End of explanation """ x = np.array([1, 2, 3, 4]) x np.sum(x) x.sum() x = np.array([[1, 1], [2, 2]]) x """ Explanation: Dimension Reduction Operation sum min, max, argmin, argmax mean, median, std, var all, any End of explanation """ x.sum() x.sum(axis=0) # columns (first dimension) x.sum(axis=1) # rows (second dimension) x = np.array([1, 3, 2]) x.min() x.max() x.argmin() # index of minimum x.argmax() # index of maximum np.all([True, True, False]) np.any([True, True, False]) a = np.zeros((100, 100), dtype=np.int) a np.any(a != 0) np.all(a == a) a = np.array([1, 2, 3, 2]) b = np.array([2, 2, 3, 2]) c = np.array([6, 4, 4, 5]) ((a <= b) & (b <= c)).all() x = np.array([1, 2, 3, 1]) y = np.array([[1, 2, 3], [5, 6, 1]]) x.mean() np.median(x) np.median(y, axis=-1) # last axis x.std() # full population standard dev. """ Explanation: <img src="http://www.scipy-lectures.org/_images/reductions.png", style="width: 20%; margin: 0 auto 0 auto;"> End of explanation """ a = np.tile(np.arange(0, 40, 10), (3, 1)).T a b = np.array([0, 1, 2]) b a + b a[:,0][:, np.newaxis] a[:,0][:, np.newaxis] + b a = np.ones((4, 5)) a a[0] a[0] = 2 a x, y = np.arange(5), np.arange(5)[:, np.newaxis] x y distance = np.sqrt(x ** 2 + y ** 2) distance """ Explanation: Broadcasting <img src="http://www.scipy-lectures.org/_images/numpy_broadcasting.png" style="width: 60%; margin: 0 auto 0 auto;"> End of explanation """ x, y = np.ogrid[0:3, 0:5] x y np.ogrid[-1:1:3j, -1:1:5j] x, y = np.mgrid[0:3, 0:5] x y np.mgrid[-1:1:3j, -1:1:5j] X, Y = np.meshgrid(np.arange(3), np.arange(5)) X Y zip(X.ravel(), Y.ravel()) plt.scatter(*np.vstack(np.meshgrid(np.linspace(-1,1,10), np.linspace(-2,2,10))).reshape(2,-1).tolist()) """ Explanation: ogrid, mgrid, meshgrid End of explanation """ a = np.array([[4, 3, 5], [1, 2, 1]]) a a[:,0] b = np.sort(a, axis=0) b b = np.sort(a, axis=1) b a = np.array([4, 3, 1, 2]) j = np.argsort(a) j a[j] """ Explanation: sort End of explanation """ x = range(10) x import math math.exp(x) math.exp(x[0]) [math.exp(x_i) for x_i in x] np.exp(x) """ Explanation: Array용 수학 함수 universal function 빠른 element-wise (vectorized) 연산 모든 NumPy/Scipy 수학 함수는 자동으로 vectorized 연산 수행 End of explanation """ np.random.seed(0) """ Explanation: Random Number numpy.random 서브패키지 seed: pseudo random 상태 설정 shuffle: 조합(combination) choice: 순열(permutation) 및 조합(combination) rand: uniform random_integers: uniform integer randn: Gaussina normal seed 컴퓨터의 랜덤 생성은 사실 랜덤이 아니다. 랜덤처럼 보이지만 정해진 알고리즘에 의해 생성되는 규칙적인 순열 시작점이 정해지면 랜덤 함수를 사용해도 정해진 숫자가 나온다. End of explanation """ x = np.arange(10) np.random.shuffle(x) x """ Explanation: numpy.random.shuffle(x) Parameters: x : array_like The array or list to be shuffled. End of explanation """ # same as shuffle np.random.choice(5, 5, replace=False) np.random.choice(5, 3, replace=False) np.random.choice(5, 10) np.random.choice(5, 10, p=[0.1, 0, 0.3, 0.6, 0]) x = np.random.rand(10000) print(x[:10]) sns.distplot(x) np.random.rand(3,2) x = np.random.random_integers(-100, 100, 50) sns.distplot(x, rug=True) x = np.random.randn(1000) sns.distplot(x, rug=True) np.random.randn(3,4) """ Explanation: numpy.random.choice(a, size=None, replace=True, p=None) Parameters: a : 1-D array-like or int If an ndarray, a random sample is generated from its elements. If an int, the random sample is generated as if a was np.arange(n) size : int or tuple of ints, optional Output shape. If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn. Default is None, in which case a single value is returned. replace : boolean, optional Whether the sample is with or without replacement p : 1-D array-like, optional The probabilities associated with each entry in a. If not given the sample assumes a uniform distribution over all entries in a. Returns: samples : 1-D ndarray, shape (size,) The generated random samples End of explanation """ np.unique([11, 11, 2, 2, 34, 34]) a = np.array([[1, 1], [2, 3]]) np.unique(a) a = np.array(['a', 'b', 'b', 'c', 'a']) index, count = np.unique(a, return_counts=True) count index np.bincount([1, 1, 2, 2, 3, 3], minlength=6) np.histogram([1.1, 2.5, 1.8, 2.4, 0.7], bins=[0, 1, 2, 3]) np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) np.histogram(np.arange(4), bins=np.arange(5), density=True) """ Explanation: random number count discrete values unique() bincount() continuous values histogram() End of explanation """
awitney/2017
hic_workshop_2017/WD/Basic_HiC_analysis.ipynb
gpl-3.0
# This is regular Python comment inside Jupyter "Code" cell. # You can easily run "Hello world" in the "Code" cell (focus on the cell and press Shift+Enter): print("Hello world!") """ Explanation: <a id="navigation"></a> Hi-C data analysis Welcome to the Jupyter notebook dedicated to Hi-C data analysis. Here we will be working in interactive Python environment with some mixture of bash command line tools. Here is the outline of what we are going to do: Notebook basics Reads maping Data filtering Binning Hi-C data visualisation Iterative correction Compartments and TADs If you have any questions, please, contact Aleksandra Galitsyna (agalitzina@gmail.com) <a id="basics"></a> 0. Notebook basics If you are new to Python and Jupyter notebook, please, take a quick look through this small list of tips. First of all, Jupyter notebook is organised in cells, which may contain text, comments and code blocks of any size. End of explanation """ %%bash echo "Current directory is: "; pwd echo "List of files in the current directory is: "; ls """ Explanation: There are also other types of cells, for example, "Markdown". Double click this cell to view raw Markdown markup content. You can define functions, classes, run pipelines and visualisations, run thousands of code lines inside a Jupyter cell. But usually, it is convenient to write simple and clean blocks of code. Note that behind this interactive notebook you have regular Python session running. Thus Python variables are accessible only throughout your history of actions in the notebook. To create a variable, you have to execute the corresponding block of code. All your variables will be lost when you restart the kernel of the notebook. You can pause or stop the kernel, save notebook (.ipynb) file, copy and insert cells via buttons in the toolbar. Please, take a look at these useful buttons. Also, try pressing 'Esc' and then 'h'. You will see shortcuts help. Jupyter notebook allows you to create "magical" cells. We will use %%bash, %%capture, %matplotlib. For example, %%bash magic makes it easier to access bash commands: End of explanation """ # Module import under custom name import numpy as np # You've started asking questions about it np? """ Explanation: If you are not sure about the function, class or variable then use its name with '?' at the end to get available documentation. Here is an example for common module numpy: End of explanation """ %%bash head -n 8 '../DATA/FASTQ/K562_B-bulk_R1.fastq' %%bash head -n 8 '../DATA/FASTQ/K562_B-bulk_R2.fastq' """ Explanation: OK, it seems that now we are ready to start our Hi-C data analysis! I've placed Go top shortcut for you in each section so that you can navigate quickly throughout the notebook. <a id="mapping"></a> 1. Reads mapping Go top 1.1 Input raw data Hi-C results in paired-end sequencing, where each pair represents one possible contact. The analysis starts with raw sequencing data (.fastq files). I've downloaded raw files from Flyamer et al. 2017 (GEO ID GSE80006) and placed them in the DATA/FASTQ/ directory. We can view these files easily with bash help. Forward and reverse reads, correspondingly: End of explanation """ %%bash ls ../GENOMES/HG19_FASTA """ Explanation: 1.2 Genome Now we have to map these reads to the genome of interest (Homo sapiens hg19 downloaded from UCSC in this case). We are going to use only chromosome 1 to minimise computational time. The genome is also pre-downloaded: End of explanation """ #%%bash #bowtie2-build /home/jovyan/GENOMES/HG19_FASTA/chr1.fa /home/jovyan/GENOMES/HG19_IND/hg19_chr1 #Time consuming step %%bash ls ../GENOMES/HG19_IND """ Explanation: For Hi-C data mapping we will use hiclib. It utilizes bowtie 2 read mapping software. Bowtie 2 indexes the genome prior to reads mapping in order to reduce memory usage. Usually, you have to run genome indexing, but I've already done this time-consuming step. That's why code for this step is included but commented. End of explanation """ import os from hiclib import mapping from mirnylib import h5dict, genome """ Explanation: 1.3 Iterative mapping First of all, we need to import useful Python packages: End of explanation """ %%bash which bowtie2 # Bowtie 2 path %%bash pwd # Current working directory path # Setting parameters and environmental variables bowtie_path = '/opt/conda/bin/bowtie2' enzyme = 'DpnII' bowtie_index_path = '/home/jovyan/GENOMES/HG19_IND/hg19_chr1' fasta_path = '/home/jovyan/GENOMES/HG19_FASTA/' chrms = ['1'] # Reading the genome genome_db = genome.Genome(fasta_path, readChrms=chrms) # Creating directories for further data processing if not os.path.exists('tmp/'): os.mkdir('tmp/', exists_) if not os.path.exists('../DATA/SAM/'): os.mkdir('../DATA/SAM/') # Set parameters for iterative mapping min_seq_len = 25 len_step = 5 nthreads = 2 temp_dir = 'tmp' bowtie_flags = '--very-sensitive' infile1 = '/home/jovyan/DATA/FASTQ1/K562_B-bulk_R1.fastq' infile2 = '/home/jovyan/DATA/FASTQ1/K562_B-bulk_R2.fastq' out1 = '/home/jovyan/DATA/SAM/K562_B-bulk_R1.chr1.sam' out2 = '/home/jovyan/DATA/SAM/K562_B-bulk_R2.chr1.sam' # Iterative mapping itself. Time consuming step! mapping.iterative_mapping( bowtie_path = bowtie_path, bowtie_index_path = bowtie_index_path, fastq_path = infile1, out_sam_path = out1, min_seq_len = min_seq_len, len_step = len_step, nthreads = nthreads, temp_dir = temp_dir, bowtie_flags = bowtie_flags) mapping.iterative_mapping( bowtie_path = bowtie_path, bowtie_index_path = bowtie_index_path, fastq_path = infile2, out_sam_path = out2, min_seq_len = min_seq_len, len_step = len_step, nthreads = nthreads, temp_dir = temp_dir, bowtie_flags = bowtie_flags) """ Explanation: Then we need to set some parameters and prepare our environment: End of explanation """ %%bash ls /home/jovyan/DATA/SAM/ %%bash head -n 10 /home/jovyan/DATA/SAM/K562_B-bulk_R1.chr1.sam.25 """ Explanation: Let's take a look at .sam files that were created during iterative mapping: End of explanation """ # Create the directory for output if not os.path.exists('../DATA/HDF5/'): os.mkdir('../DATA/HDF5/') # Define file name for output out = '/home/jovyan/DATA/HDF5/K562_B-bulk.fragments.hdf5' # Open output file mapped_reads = h5dict.h5dict(out) # Parse mapping data and write to output file mapping.parse_sam( sam_basename1 = out1, sam_basename2 = out2, out_dict = mapped_reads, genome_db = genome_db, enzyme_name = enzyme, save_seqs = False, keep_ids = False) """ Explanation: 1.4 Making sense of mapping output For each read length and orientation, we have a file. Now we need to merge them into the single dataset (.hdf5 file): End of explanation """ %%bash ls /home/jovyan/DATA/HDF5/ import h5py # Reading the file a = h5py.File('/home/jovyan/DATA/HDF5/K562_B-bulk.fragments.hdf5') # "a" variable has dictionary-like structure, we can view its keys, for example: list( a.keys() ) # Mapping positions for forward reads are stored under 'cuts1' key: a['cuts1'].value """ Explanation: Let's take a look at the created file: End of explanation """ from hiclib import fragmentHiC inp = '/home/jovyan/DATA/HDF5/K562_B-bulk.fragments.hdf5' out = '/home/jovyan/DATA/HDF5/K562_B-bulk.fragments_filtered.hdf5' # Create output file fragments = fragmentHiC.HiCdataset( filename = out, genome = genome_db, maximumMoleculeLength= 500, mode = 'w') # Parse input data fragments.parseInputData( dictLike=inp) # Filtering fragments.filterRsiteStart(offset=5) # reads map too close to restriction site fragments.filterDuplicates() # remove PCR duplicates fragments.filterLarge() # remove too large restriction fragments fragments.filterExtreme(cutH=0.005, cutL=0) # remove fragments with too high and low counts # Some hidden filteres were also applied, we can check them all: fragments.printMetadata() """ Explanation: <a id="filtering"></a> 2. Data filtering Go top The raw Hi-C data is mapped and interpreted, the next step is to filter out possible methodological artefacts: End of explanation """ import pandas as pd df_stat = pd.DataFrame(list(fragments.metadata.items()), columns=['Feature', 'Count']) df_stat df_stat['Ratio of total'] = 100*df_stat['Count']/df_stat.loc[2,'Count'] df_stat """ Explanation: Nice visualisation of the data: End of explanation """ # Define file name for binned data. Note "{}" prepared for string formatting out_bin = '/home/jovyan/DATA/HDF5/K562_B-bulk.binned_{}.hdf5' res_kb = [100, 20] # Several resolutions in Kb for res in res_kb: print(res) outmap = out_bin.format(str(res)+'kb') # String formatting fragments.saveHeatmap(outmap, res*1000) # Save heatmap del fragments # delete unwanted object """ Explanation: <a id="binning"></a> 3. Data binning Go top The previous analysis involved interactions of restriction fragments, now we would like to work with interactions of genomic bins. End of explanation """ # Importing visualisation modules import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns sns.set_style('ticks') %matplotlib inline from hiclib.binnedData import binnedDataAnalysis res = 100 # Resolution in Kb # prepare to read the data data_hic = binnedDataAnalysis(resolution=res*1000, genome=genome_db) # read the data data_hic.simpleLoad('/home/jovyan/DATA/HDF5/K562_B-bulk.binned_{}.hdf5'.format(str(res)+'kb'),'hic') mtx = data_hic.dataDict['hic'] # show heatmap plt.figure(figsize=[15,15]) plt.imshow(mtx[0:200, 0:200], cmap='jet', interpolation='None') """ Explanation: <a id="visualisation"></a> 4. Hi-C data visualisation Go top Let's take a look at the resulting heat maps. End of explanation """ # Additional data filtering data_hic.removeDiagonal() data_hic.removePoorRegions() data_hic.removeZeros() data_hic.iterativeCorrectWithoutSS(force=True) data_hic.restoreZeros() mtx = data_hic.dataDict['hic'] plt.figure(figsize=[15,15]) plt.imshow(mtx[200:500, 200:500], cmap='jet', interpolation='None') """ Explanation: <a id="correction"></a> 5. Iterative correction Go top The next typical step is data correction for unequal amplification and accessibility of genomic regions. We will use iterative correction. End of explanation """ # Load compartments computed previously based on K562 dataset from Rao et al. 2014 eig = np.loadtxt('/home/jovyan/DATA/ANNOT/comp_K562_100Kb_chr1.tsv') eig from matplotlib import gridspec bgn = 0 end = 500 fig = plt.figure(figsize=(10,10)) gs = gridspec.GridSpec(2, 1, height_ratios=[20,2]) gs.update(wspace=0.0, hspace=0.0) ax = plt.subplot(gs[0,0]) ax.matshow(mtx[bgn:end, bgn:end], cmap='jet', origin='lower', aspect='auto') ax.set_xticks([]) ax.set_yticks([]) axl = plt.subplot(gs[1,0]) plt.plot(range(end-bgn), eig[bgn:end] ) plt.xlim(0, end-bgn) plt.xlabel('Eigenvector values') ticks = range(bgn, end+1, 100) ticklabels = ['{} Kb'.format(x) for x in ticks] plt.xticks(ticks, ticklabels) print('') """ Explanation: <a id="meta"></a> 7. Compartmanets and TADs Go top 7.1 Comparison with compartments Compartments usually can be found at whole-genome datasets, but we have only chromosome 1. Still, we can try to find some visual signs of compartments. End of explanation """ mtx_Rao = np.genfromtxt('../DATA/ANNOT/Rao_K562_chr1.csv', delimiter=',') bgn = 0 end = 500 fig = plt.figure(figsize=(10,10)) gs = gridspec.GridSpec(2, 1, height_ratios=[20,2]) gs.update(wspace=0.0, hspace=0.0) ax = plt.subplot(gs[0,0]) ax.matshow(mtx_Rao[bgn:end, bgn:end], cmap='jet', origin='lower', aspect='auto', vmax=1000) ax.set_xticks([]) ax.set_yticks([]) axl = plt.subplot(gs[1,0]) plt.plot(range(end-bgn), eig[bgn:end] ) plt.xlim(0, end-bgn) plt.xlabel('Eigenvector values') ticks = range(bgn, end+1, 100) ticklabels = ['{} Kb'.format(x) for x in ticks] plt.xticks(ticks, ticklabels) print('') """ Explanation: Seems to be nothing special with compartments. What if we had much better coverage by reads? Let's take a look at the dataset from Rao et al. 2014, GEO GSE63525, HIC069: End of explanation """ # Import Python package import lavaburst good_bins = mtx.astype(bool).sum(axis=0) > 1 # We have to mask rows/cols if data is missing gam=[0.15, 0.25, 0.5, 0.75, 1.0] # set of parameters gamma for TADs calling segments_dict = {} for gam_current in gam: print(gam_current) S = lavaburst.scoring.armatus_score(mtx, gamma=gam_current, binmask=good_bins) model = lavaburst.model.SegModel(S) segments = model.optimal_segmentation() # Positions of TADs for input matrix segments_dict[gam_current] = segments.copy() A = mtx.copy() good_bins = A.astype(bool).sum(axis=0) > 0 At = lavaburst.utils.tilt_heatmap(mtx, n_diags=100) start_tmp = 0 end_tmp = 500 f = plt.figure(figsize=(20, 6)) ax = f.add_subplot(111) blues = sns.cubehelix_palette(0.4, gamma=0.5, rot=-0.3, dark=0.1, light=0.9, as_cmap=True) ax.matshow(np.log(At[start_tmp: end_tmp]), cmap=blues) cmap = mpl.cm.get_cmap('brg') gammas = segments_dict.keys() for n, gamma in enumerate(gammas): segments = segments_dict[gamma] for a in segments[:-1]: if a[1]<start_tmp or a[0]>end_tmp: continue ax.plot([a[0]-start_tmp, a[0]+(a[1]-a[0])/2-start_tmp], [0, -(a[1]-a[0])], c=cmap(n/len(gammas)), alpha=0.5) ax.plot([a[0]+(a[1]-a[0])/2-start_tmp, a[1]-start_tmp], [-(a[1]-a[0]), 0], c=cmap(n/len(gammas)), alpha=0.5) a = segments[-1] ax.plot([a[0]-start_tmp, a[0]+(a[1]-a[0])/2-start_tmp], [0, -(a[1]-a[0])], c=cmap(n/len(gammas)), alpha=0.5, label=gamma) ax.plot([a[0]+(a[1]-a[0])/2-start_tmp, a[1]-start_tmp], [-(a[1]-a[0]), 0], c=cmap(n/len(gammas)), alpha=0.5) ax.set_xlim([0,end_tmp-start_tmp]) ax.set_ylim([100,-100]) ax.legend(bbox_to_anchor=(1.1, 1.05)) ax.set_aspect(0.5) #Let's check what are median TAD sized with different parameters: for gam_current in gam: segments = segments_dict[gam_current] tad_lens = segments[:,1]-segments[:,0] good_lens = (tad_lens>=200/res)&(tad_lens<100) print(res*1000*np.mean(tad_lens[good_lens])) """ Explanation: 7.2 Topologically associating domains (TADs) For TADs calling we will use lavaburst package. The code below is based on this example. End of explanation """
maciejkula/lightfm
examples/stackexchange/hybrid_crossvalidated.ipynb
apache-2.0
import numpy as np from lightfm.datasets import fetch_stackexchange data = fetch_stackexchange('crossvalidated', test_set_fraction=0.1, indicator_features=False, tag_features=True) train = data['train'] test = data['test'] """ Explanation: Item cold-start: recommending StackExchange questions In this example we'll use the StackExchange dataset to explore recommendations under item-cold start. Data dumps from the StackExchange network are available at https://archive.org/details/stackexchange, and we'll use one of them --- for stats.stackexchange.com --- here. The consists of users answering questions: in the user-item interaction matrix, each user is a row, and each question is a column. Based on which users answered which questions in the training set, we'll try to recommend new questions in the training set. Let's start by loading the data. We'll use the datasets module. End of explanation """ print('The dataset has %s users and %s items, ' 'with %s interactions in the test and %s interactions in the training set.' % (train.shape[0], train.shape[1], test.getnnz(), train.getnnz())) """ Explanation: Let's examine the data: End of explanation """ # Import the model from lightfm import LightFM # Set the number of threads; you can increase this # ify you have more physical cores available. NUM_THREADS = 2 NUM_COMPONENTS = 30 NUM_EPOCHS = 3 ITEM_ALPHA = 1e-6 # Let's fit a WARP model: these generally have the best performance. model = LightFM(loss='warp', item_alpha=ITEM_ALPHA, no_components=NUM_COMPONENTS) # Run 3 epochs and time it. %time model = model.fit(train, epochs=NUM_EPOCHS, num_threads=NUM_THREADS) """ Explanation: The training and test set are divided chronologically: the test set contains the 10% of interactions that happened after the 90% in the training set. This means that many of the questions in the test set have no interactions. This is an accurate description of a questions answering system: it is most important to recommend questions that have not yet been answered to the expert users who can answer them. A pure collaborative filtering model This is clearly a cold-start scenario, and so we can expect a traditional collaborative filtering model to do very poorly. Let's check if that's the case: End of explanation """ # Import the evaluation routines from lightfm.evaluation import auc_score # Compute and print the AUC score train_auc = auc_score(model, train, num_threads=NUM_THREADS).mean() print('Collaborative filtering train AUC: %s' % train_auc) """ Explanation: As a means of sanity checking, let's calculate the model's AUC on the training set first. If it's reasonably high, we can be sure that the model is not doing anything stupid and is fitting the training data well. End of explanation """ # We pass in the train interactions to exclude them from predictions. # This is to simulate a recommender system where we do not # re-recommend things the user has already interacted with in the train # set. test_auc = auc_score(model, test, train_interactions=train, num_threads=NUM_THREADS).mean() print('Collaborative filtering test AUC: %s' % test_auc) """ Explanation: Fantastic, the model is fitting the training set well. But what about the test set? End of explanation """ # Set biases to zero model.item_biases *= 0.0 test_auc = auc_score(model, test, train_interactions=train, num_threads=NUM_THREADS).mean() print('Collaborative filtering test AUC: %s' % test_auc) """ Explanation: This is terrible: we do worse than random! This is not very surprising: as there is no training data for the majority of the test questions, the model cannot compute reasonable representations of the test set items. The fact that we score them lower than other items (AUC < 0.5) is due to estimated per-item biases, which can be confirmed by setting them to zero and re-evaluating the model. End of explanation """ item_features = data['item_features'] tag_labels = data['item_feature_labels'] print('There are %s distinct tags, with values like %s.' % (item_features.shape[1], tag_labels[:3].tolist())) """ Explanation: A hybrid model We can do much better by employing LightFM's hybrid model capabilities. The StackExchange data comes with content information in the form of tags users apply to their questions: End of explanation """ # Define a new model instance model = LightFM(loss='warp', item_alpha=ITEM_ALPHA, no_components=NUM_COMPONENTS) # Fit the hybrid model. Note that this time, we pass # in the item features matrix. model = model.fit(train, item_features=item_features, epochs=NUM_EPOCHS, num_threads=NUM_THREADS) """ Explanation: We can use these features (instead of an identity feature matrix like in a pure CF model) to estimate a model which will generalize better to unseen examples: it will simply use its representations of item features to infer representations of previously unseen questions. Let's go ahead and fit a model of this type. End of explanation """ # Don't forget the pass in the item features again! train_auc = auc_score(model, train, item_features=item_features, num_threads=NUM_THREADS).mean() print('Hybrid training set AUC: %s' % train_auc) """ Explanation: As before, let's sanity check the model on the training set. End of explanation """ test_auc = auc_score(model, test, train_interactions=train, item_features=item_features, num_threads=NUM_THREADS).mean() print('Hybrid test set AUC: %s' % test_auc) """ Explanation: Note that the training set AUC is lower than in a pure CF model. This is fine: by using a lower-rank item feature matrix, we have effectively regularized the model, giving it less freedom to fit the training data. Despite this the model does much better on the test set: End of explanation """ def get_similar_tags(model, tag_id): # Define similarity as the cosine of the angle # between the tag latent vectors # Normalize the vectors to unit length tag_embeddings = (model.item_embeddings.T / np.linalg.norm(model.item_embeddings, axis=1)).T query_embedding = tag_embeddings[tag_id] similarity = np.dot(tag_embeddings, query_embedding) most_similar = np.argsort(-similarity)[1:4] return most_similar for tag in (u'bayesian', u'regression', u'survival'): tag_id = tag_labels.tolist().index(tag) print('Most similar tags for %s: %s' % (tag_labels[tag_id], tag_labels[get_similar_tags(model, tag_id)])) """ Explanation: This is as expected: because items in the test set share tags with items in the training set, we can provide better test set recommendations by using the tag representations learned from training. Bonus: tag embeddings One of the nice properties of the hybrid model is that the estimated tag embeddings capture semantic characteristics of the tags. Like the word2vec model, we can use this property to explore semantic tag similarity: End of explanation """
irockafe/revo_healthcare
notebooks/MTBLS315/exploratory/MTBLS315_uhplc_pos_classifer-4ppm.ipynb
mit
import time import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np from sklearn import preprocessing from sklearn.ensemble import RandomForestClassifier from sklearn.cross_validation import StratifiedShuffleSplit from sklearn.cross_validation import cross_val_score #from sklearn.model_selection import StratifiedShuffleSplit #from sklearn.model_selection import cross_val_score from sklearn.ensemble import AdaBoostClassifier from sklearn.metrics import roc_curve, auc from sklearn.utils import shuffle from scipy import interp %matplotlib inline def remove_zero_columns(X, threshold=1e-20): # convert zeros to nan, drop all nan columns, the replace leftover nan with zeros X_non_zero_colum = X.replace(0, np.nan).dropna(how='all', axis=1).replace(np.nan, 0) #.dropna(how='all', axis=0).replace(np.nan,0) return X_non_zero_colum def zero_fill_half_min(X, threshold=1e-20): # Fill zeros with 1/2 the minimum value of that column # input dataframe. Add only to zero values # Get a vector of 1/2 minimum values half_min = X[X > threshold].min(axis=0)*0.5 # Add the half_min values to a dataframe where everything that isn't zero is NaN. # then convert NaN's to 0 fill_vals = (X[X < threshold] + half_min).fillna(value=0) # Add the original dataframe to the dataframe of zeros and fill-values X_zeros_filled = X + fill_vals return X_zeros_filled toy = pd.DataFrame([[1,2,3,0], [0,0,0,0], [0.5,1,0,0]], dtype=float) toy_no_zeros = remove_zero_columns(toy) toy_filled_zeros = zero_fill_half_min(toy_no_zeros) print toy print toy_no_zeros print toy_filled_zeros """ Explanation: <h2> 4ppm </h2> Enough retcor groups, and fewer peak insertion problems than 4.5 or 5ppm. End of explanation """ ### Subdivide the data into a feature table data_path = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revo_healthcare/data/processed/MTBLS315/'\ 'uhplc_pos/xcms_result_4.csv' ## Import the data and remove extraneous columns df = pd.read_csv(data_path, index_col=0) df.shape df.head() # Make a new index of mz:rt mz = df.loc[:,"mz"].astype('str') rt = df.loc[:,"rt"].astype('str') idx = mz+':'+rt df.index = idx df # separate samples from xcms/camera things to make feature table not_samples = ['mz', 'mzmin', 'mzmax', 'rt', 'rtmin', 'rtmax', 'npeaks', 'uhplc_pos', ] samples_list = df.columns.difference(not_samples) mz_rt_df = df[not_samples] # convert to samples x features X_df_raw = df[samples_list].T # Remove zero-full columns and fill zeroes with 1/2 minimum values X_df = remove_zero_columns(X_df_raw) X_df_zero_filled = zero_fill_half_min(X_df) print "original shape: %s \n# zeros: %f\n" % (X_df_raw.shape, (X_df_raw < 1e-20).sum().sum()) print "zero-columns repalced? shape: %s \n# zeros: %f\n" % (X_df.shape, (X_df < 1e-20).sum().sum()) print "zeros filled shape: %s \n#zeros: %f\n" % (X_df_zero_filled.shape, (X_df_zero_filled < 1e-20).sum().sum()) # Convert to numpy matrix to play nicely with sklearn X = X_df.as_matrix() print X.shape """ Explanation: <h2> Import the dataframe and remove any features that are all zero </h2> End of explanation """ # Get mapping between sample name and assay names path_sample_name_map = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revo_healthcare/data/raw/'\ 'MTBLS315/metadata/a_UPLC_POS_nmfi_and_bsi_diagnosis.txt' # Index is the sample name sample_df = pd.read_csv(path_sample_name_map, sep='\t', index_col=0) sample_df = sample_df['MS Assay Name'] sample_df.shape print sample_df.head(10) # get mapping between sample name and sample class path_sample_class_map = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revo_healthcare/data/raw/'\ 'MTBLS315/metadata/s_NMFI and BSI diagnosis.txt' class_df = pd.read_csv(path_sample_class_map, sep='\t') # Set index as sample name class_df.set_index('Sample Name', inplace=True) class_df = class_df['Factor Value[patient group]'] print class_df.head(10) # convert all non-malarial classes into a single classes # (collapse non-malarial febril illness and bacteremia together) class_map_df = pd.concat([sample_df, class_df], axis=1) class_map_df.rename(columns={'Factor Value[patient group]': 'class'}, inplace=True) class_map_df binary_class_map = class_map_df.replace(to_replace=['non-malarial febrile illness', 'bacterial bloodstream infection' ], value='non-malarial fever') binary_class_map # convert classes to numbers le = preprocessing.LabelEncoder() le.fit(binary_class_map['class']) y = le.transform(binary_class_map['class']) """ Explanation: <h2> Get mappings between sample names, file names, and sample classes </h2> End of explanation """ def rf_violinplot(X, y, n_iter=25, test_size=0.3, random_state=1, n_estimators=1000): cross_val_skf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state) clf = RandomForestClassifier(n_estimators=n_estimators, random_state=random_state) scores = cross_val_score(clf, X, y, cv=cross_val_skf) sns.violinplot(scores,inner='stick') rf_violinplot(X,y) # TODO - Switch to using caret for this bs..? # Do multi-fold cross validation for adaboost classifier def adaboost_violinplot(X, y, n_iter=25, test_size=0.3, random_state=1, n_estimators=200): cross_val_skf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state) clf = AdaBoostClassifier(n_estimators=n_estimators, random_state=random_state) scores = cross_val_score(clf, X, y, cv=cross_val_skf) sns.violinplot(scores,inner='stick') adaboost_violinplot(X,y) # TODO PQN normalization, and log-transformation, # and some feature selection (above certain threshold of intensity, use principal components), et def pqn_normalize(X, integral_first=False, plot=False): ''' Take a feature table and run PQN normalization on it ''' # normalize by sum of intensities in each sample first. Not necessary if integral_first: sample_sums = np.sum(X, axis=1) X = (X / sample_sums[:,np.newaxis]) # Get the median value of each feature across all samples mean_intensities = np.median(X, axis=0) # Divde each feature by the median value of each feature - # these are the quotients for each feature X_quotients = (X / mean_intensities[np.newaxis,:]) if plot: # plot the distribution of quotients from one sample for i in range(1,len(X_quotients[:,1])): print 'allquotients reshaped!\n\n', #all_quotients = X_quotients.reshape(np.prod(X_quotients.shape)) all_quotients = X_quotients[i,:] print all_quotients.shape x = np.random.normal(loc=0, scale=1, size=len(all_quotients)) sns.violinplot(all_quotients) plt.title("median val: %f\nMax val=%f" % (np.median(all_quotients), np.max(all_quotients))) plt.plot( title="median val: ")#%f" % np.median(all_quotients)) plt.xlim([-0.5, 5]) plt.show() # Define a quotient for each sample as the median of the feature-specific quotients # in that sample sample_quotients = np.median(X_quotients, axis=1) # Quotient normalize each samples X_pqn = X / sample_quotients[:,np.newaxis] return X_pqn # Make a fake sample, with 2 samples at 1x and 2x dilutions X_toy = np.array([[1,1,1,], [2,2,2], [3,6,9], [6,12,18]], dtype=float) print X_toy print X_toy.reshape(1, np.prod(X_toy.shape)) X_toy_pqn_int = pqn_normalize(X_toy, integral_first=True, plot=True) print X_toy_pqn_int print '\n\n\n' X_toy_pqn = pqn_normalize(X_toy) print X_toy_pqn """ Explanation: <h2> Plot the distribution of classification accuracy across multiple cross-validation splits - Kinda Dumb</h2> Turns out doing this is kind of dumb, because you're not taking into account the prediction score your classifier assigned. Use AUC's instead. You want to give your classifier a lower score if it is really confident and wrong, than vice-versa End of explanation """ X_pqn = pqn_normalize(X) print X_pqn """ Explanation: <h2> pqn normalize your features </h2> End of explanation """ rf_violinplot(X_pqn, y) # Do multi-fold cross validation for adaboost classifier adaboost_violinplot(X_pqn, y) """ Explanation: <h2>Random Forest & adaBoost with PQN-normalized data</h2> End of explanation """ X_pqn_nlog = np.log(X_pqn) rf_violinplot(X_pqn_nlog, y) adaboost_violinplot(X_pqn_nlog, y) def roc_curve_cv(X, y, clf, cross_val, path='/home/irockafe/Desktop/roc.pdf', save=False, plot=True): t1 = time.time() # collect vals for the ROC curves tpr_list = [] mean_fpr = np.linspace(0,1,100) auc_list = [] # Get the false-positive and true-positive rate for i, (train, test) in enumerate(cross_val): clf.fit(X[train], y[train]) y_pred = clf.predict_proba(X[test])[:,1] # get fpr, tpr fpr, tpr, thresholds = roc_curve(y[test], y_pred) roc_auc = auc(fpr, tpr) #print 'AUC', roc_auc #sns.plt.plot(fpr, tpr, lw=10, alpha=0.6, label='ROC - AUC = %0.2f' % roc_auc,) #sns.plt.show() tpr_list.append(interp(mean_fpr, fpr, tpr)) tpr_list[-1][0] = 0.0 auc_list.append(roc_auc) if (i % 10 == 0): print '{perc}% done! {time}s elapsed'.format(perc=100*float(i)/cross_val.n_iter, time=(time.time() - t1)) # get mean tpr and fpr mean_tpr = np.mean(tpr_list, axis=0) # make sure it ends up at 1.0 mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) std_auc = np.std(auc_list) if plot: # plot mean auc plt.plot(mean_fpr, mean_tpr, label='Mean ROC - AUC = %0.2f $\pm$ %0.2f' % (mean_auc, std_auc), lw=5, color='b') # plot luck-line plt.plot([0,1], [0,1], linestyle = '--', lw=2, color='r', label='Luck', alpha=0.5) # plot 1-std std_tpr = np.std(tpr_list, axis=0) tprs_upper = np.minimum(mean_tpr + std_tpr, 1) tprs_lower = np.maximum(mean_tpr - std_tpr, 0) plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.2, label=r'$\pm$ 1 stdev') plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC curve, {iters} iterations of {cv} cross validation'.format( iters=cross_val.n_iter, cv='{train}:{test}'.format(test=cross_val.test_size, train=(1-cross_val.test_size))) ) plt.legend(loc="lower right") if save: plt.savefig(path, format='pdf') plt.show() return tpr_list, auc_list, mean_fpr rf_estimators = 1000 n_iter = 3 test_size = 0.3 random_state = 1 cross_val_rf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state) clf_rf = RandomForestClassifier(n_estimators=rf_estimators, random_state=random_state) rf_graph_path = '''/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revolutionizing_healthcare/data/MTBLS315/\ isaac_feature_tables/uhplc_pos/rf_roc_{trees}trees_{cv}cviter.pdf'''.format(trees=rf_estimators, cv=n_iter) print cross_val_rf.n_iter print cross_val_rf.test_size tpr_vals, auc_vals, mean_fpr = roc_curve_cv(X_pqn, y, clf_rf, cross_val_rf, path=rf_graph_path, save=False) # For adaboosted n_iter = 3 test_size = 0.3 random_state = 1 adaboost_estimators = 200 adaboost_path = '''/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/revolutionizing_healthcare/data/MTBLS315/\ isaac_feature_tables/uhplc_pos/adaboost_roc_{trees}trees_{cv}cviter.pdf'''.format(trees=adaboost_estimators, cv=n_iter) cross_val_adaboost = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state) clf = AdaBoostClassifier(n_estimators=adaboost_estimators, random_state=random_state) adaboost_tpr, adaboost_auc, adaboost_fpr = roc_curve_cv(X_pqn, y, clf, cross_val_adaboost, path=adaboost_path) """ Explanation: <h2> RF & adaBoost with PQN-normalized, log-transformed data </h2> Turns out a monotonic transformation doesn't really affect any of these things. I guess they're already close to unit varinace...? End of explanation """ # Make a null model AUC curve def make_null_model(X, y, clf, cross_val, random_state=1, num_shuffles=5, plot=True): ''' Runs the true model, then sanity-checks by: Shuffles class labels and then builds cross-validated ROC curves from them. Compares true AUC vs. shuffled auc by t-test (assumes normality of AUC curve) ''' null_aucs = [] print y.shape print X.shape tpr_true, auc_true, fpr_true = roc_curve_cv(X, y, clf, cross_val, save=True) # shuffle y lots of times for i in range(0, num_shuffles): #Iterate through the shuffled y vals and repeat with appropriate params # Retain the auc vals for final plotting of distribution y_shuffle = shuffle(y) cross_val.y = y_shuffle cross_val.y_indices = y_shuffle print 'Number of differences b/t original and shuffle: %s' % (y == cross_val.y).sum() # Get auc values for number of iterations tpr, auc, fpr = roc_curve_cv(X, y_shuffle, clf, cross_val, plot=False, ) null_aucs.append(auc) #plot the outcome if plot: flattened_aucs = [j for i in null_aucs for j in i] my_dict = {'true_auc': auc_true, 'null_auc': flattened_aucs} df_poop = pd.DataFrame.from_dict(my_dict, orient='index').T df_tidy = pd.melt(df_poop, value_vars=['true_auc', 'null_auc'], value_name='auc', var_name='AUC_type') #print flattened_aucs sns.violinplot(x='AUC_type', y='auc', inner='points', data=df_tidy) # Plot distribution of AUC vals plt.title("Classification is not possible when data is shuffled") #sns.plt.ylabel('count') plt.xlabel('True model vs. Null Model') plt.ylabel('AUC') #sns.plt.plot(auc_true, 0, color='red', markersize=10) plt.savefig('/home/irockafe/Desktop/auc distribution') plt.show() # Do a quick t-test to see if odds of randomly getting an AUC that good return auc_true, null_aucs # Make a null model AUC curve & compare it to null-model # Random forest magic! rf_estimators = 1000 n_iter = 50 test_size = 0.3 random_state = 1 cross_val_rf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size, random_state=random_state) clf_rf = RandomForestClassifier(n_estimators=rf_estimators, random_state=random_state) true_auc, all_aucs = make_null_model(X_pqn, y, clf_rf, cross_val_rf, num_shuffles=5) # make dataframe from true and false aucs flattened_aucs = [j for i in all_aucs for j in i] my_dict = {'true_auc': true_auc, 'null_auc': flattened_aucs} df_poop = pd.DataFrame.from_dict(my_dict, orient='index').T df_tidy = pd.melt(df_poop, value_vars=['true_auc', 'null_auc'], value_name='auc', var_name='AUC_type') print df_tidy.head() #print flattened_aucs sns.violinplot(x='AUC_type', y='auc', inner='points', data=df_tidy, bw=0.7) # Plot distribution of AUC vals plt.title("Classification is not possible when data is shuffled") #sns.plt.ylabel('count') plt.xlabel('True model vs. Null Model') plt.ylabel('AUC') #sns.plt.plot(auc_true, 0, color='red', markersize=10) plt.savefig('/home/irockafe/Desktop/auc distribution', format='pdf') plt.show() """ Explanation: <h2> Great, you can classify things. But make null models and do a sanity check to make sure you arent just classifying garbage </h2> End of explanation """ from sklearn.decomposition import PCA # Check PCA of things def PCA_plot(X, y, n_components, plot_color, class_nums, class_names, title='PCA'): pca = PCA(n_components=n_components) X_pca = pca.fit(X).transform(X) print zip(plot_color, class_nums, class_names) for color, i, target_name in zip(plot_color, class_nums, class_names): # plot one class at a time, first plot all classes y == 0 #print color #print y == i xvals = X_pca[y == i, 0] print xvals.shape yvals = X_pca[y == i, 1] plt.scatter(xvals, yvals, color=color, alpha=0.8, label=target_name) plt.legend(bbox_to_anchor=(1.01,1), loc='upper left', shadow=False)#, scatterpoints=1) plt.title('PCA of Malaria data') plt.show() PCA_plot(X_pqn, y, 2, ['red', 'blue'], [0,1], ['malaria', 'non-malaria fever']) PCA_plot(X, y, 2, ['red', 'blue'], [0,1], ['malaria', 'non-malaria fever']) """ Explanation: <h2> Let's check out some PCA plots </h2> End of explanation """ # convert classes to numbers le = preprocessing.LabelEncoder() le.fit(class_map_df['class']) y_three_class = le.transform(class_map_df['class']) print class_map_df.head(10) print y_three_class print X.shape print y_three_class.shape y_labels = np.sort(class_map_df['class'].unique()) print y_labels colors = ['green', 'red', 'blue'] print np.unique(y_three_class) PCA_plot(X_pqn, y_three_class, 2, colors, np.unique(y_three_class), y_labels) PCA_plot(X, y_three_class, 2, colors, np.unique(y_three_class), y_labels) """ Explanation: <h2> What about with all thre classes? </h2> End of explanation """
yala/introdeeplearning
draft/rnn.ipynb
mit
import tensorflow as tf import cPickle as pickle from collections import defaultdict import re, random import numpy as np from sklearn.feature_extraction.text import CountVectorizer #Read data and do preprocessing def read_data(fn): with open(fn) as f: data = pickle.load(f) #Clean the text new_data = [] pattern = re.compile('[\W_]+') for text,label in data: text = text.strip("\r\n ").split() x = [] for word in text: word = pattern.sub('', word) word = word.lower() if 0 < len(word) < 20: x.append(word) new_data.append((' '.join(x),label)) return new_data train = read_data("data/train.p") print train[0:10] """ Explanation: RNN Sentiment Classifier In the previous lab, you built a tweet sentiment classifier based on Bag-Of-Words features. Now we ask you to improve this model by representing it as a sequence of words. Step 1: Input Preprocessing Run read_data() below to read training data, normalizing the text. End of explanation """ train_x, train_y = zip(*train) vectorizer = CountVectorizer(train_x, min_df=0.001) vectorizer.fit(train_x) vocab = vectorizer.vocabulary_ UNK_ID = len(vocab) PAD_ID = len(vocab) + 1 word2id = lambda w:vocab[w] if w in vocab else UNK_ID train_x = [[word2id(w) for w in x.split()] for x in train_x] train_data = zip(train_x, train_y) print train_data[0:10] """ Explanation: Step 2: Build a Vocabulary Here we will use sklearn's CountVectorizer to automatically build a vocabulary over the training set. Infrequent words are pruned to make our life easier. Here we have two special tokens: UNK_ID for unknown words and PAD_ID for special token &lt;PAD&gt; that is used to pad sentences to the same length. End of explanation """ import math #build RNN model batch_size = 20 hidden_size = 100 vocab_size = len(vocab) + 2 def lookup_table(input_, vocab_size, output_size, name): with tf.variable_scope(name): embedding = tf.get_variable("embedding", [vocab_size, output_size], tf.float32, tf.random_normal_initializer(stddev=1.0 / math.sqrt(output_size))) return tf.nn.embedding_lookup(embedding, input_) def linear(input_, output_size, name, init_bias=0.0): shape = input_.get_shape().as_list() with tf.variable_scope(name): W = tf.get_variable("Matrix", [shape[-1], output_size], tf.float32, tf.random_normal_initializer(stddev=1.0 / math.sqrt(shape[-1]))) if init_bias is None: return tf.matmul(input_, W) with tf.variable_scope(name): b = tf.get_variable("bias", [output_size], initializer=tf.constant_initializer(init_bias)) return tf.matmul(input_, W) + b session = tf.Session() tweets = tf.placeholder(tf.int32, [batch_size, None]) labels = tf.placeholder(tf.float32, [batch_size]) embedding = lookup_table(tweets, vocab_size, hidden_size, name="word_embedding") lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_size) init_state = lstm_cell.zero_state(batch_size, tf.float32) _, final_state = tf.nn.dynamic_rnn(lstm_cell, embedding, initial_state=init_state) sentiment = linear(final_state[1], 1, name="output") sentiment = tf.squeeze(sentiment, [1]) loss = tf.nn.sigmoid_cross_entropy_with_logits(sentiment, labels) loss = tf.reduce_mean(loss) prediction = tf.to_float(tf.greater_equal(sentiment, 0.5)) pred_err = tf.to_float(tf.not_equal(prediction, labels)) pred_err = tf.reduce_sum(pred_err) optimizer = tf.train.AdamOptimizer().minimize(loss) tf.global_variables_initializer().run(session=session) saver = tf.train.Saver() random.shuffle(train_data) err_rate = 0.0 for step in xrange(0, len(train_data), batch_size): batch = train_data[step:step+batch_size] batch_x, batch_y = zip(*batch) batch_x = list(batch_x) if len(batch_x) != batch_size: continue max_len = max([len(x) for x in batch_x]) for i in xrange(batch_size): len_x = len(batch_x[i]) batch_x[i] = [PAD_ID] * (max_len - len_x) + batch_x[i] batch_x = np.array(batch_x, dtype=np.int32) batch_y = np.array(batch_y, dtype=np.float32) feed_map = {tweets:batch_x, labels:batch_y} _, batch_err = session.run([optimizer, pred_err], feed_dict=feed_map) err_rate += batch_err if step % 1000 == 0 and step > 0: print err_rate / step """ Explanation: Step 3: Build an LSTM Encoder A classifier requires the input feature vector to be of fixed size, while sentences are of different lengths. Thus, we need a model (called as encoder) to transform a sentence to a fixed size vector. This could be done by a recurrent neural net (RNN), by taking the last hidden state of LSTM encoder as the feature vector. We could then build a linear (or a multi-layer) network upon it to perform a classifier. Step 3.1 Embedding Lookup Layer At input layer, words are represented by their ID (one-hot vector). Before feeding words to LSTM cell, we need an embedding lookup layer to map words to their word vector, given their ID. You should write a function to perform this operation. def lookup_table(input_, vocab_size, output_size) where input_ is a matrix of sentences (sentences are padded to the same length in a batch), vocab_size is the size of vocabulary, output_size the size of word vector. You could use the tensorflow API function embedding-lookup Step 3.2 LSTM Layer Now we have the embedding layer, we can build LSTM layer upon it. It requires 4 steps: 1. Create a LSTM Cell using BasicLSTMCell 2. Let's say you have a lstm_cell object, declare initial state vector by calling lstm_cell.zero_state(). 3. Create a RNN Layer using dynamic-rnn, get the final state of it. Step 3.3 Classification Layer Now you have a fixed-size vector for sentences, build a classification layer the same as previous. Declare the cross-entropy loss function. Step 3.4 Training Now you need to feed the network with training data, and optimize it. Note that you have to pad sentences in the batch to the same length. To do this, you should put some PAD_ID tokens before each sentences so that they are in the same length. (Don't put them in the back because it would be harder to get the final hidden state of each sentence!) The Full Code End of explanation """
LSSTC-DSFP/LSSTC-DSFP-Sessions
Sessions/Session05/Day2/Introduction to Photometry-Solutions.ipynb
mit
import numpy as np import matplotlib.pyplot as plt import astropy.io.fits as fits ## make matplotlib appear in the notebook rather than in a new window %matplotlib inline """ Explanation: Introduction to Photometry - Solutions Dora Föhring, University of Hawaii Institute for Astronomy Aim: Demonstrate photometry on a series of bias and flat field corrected images of a Near Earth Asteroid. 0. Prerequisites End of explanation """ datadir = '' objname = '2016HO3' """ Explanation: 0.1 Directory Set up End of explanation """ def plotfits(imno): img = fits.open(datadir+objname+'_{0:02d}.fits'.format(imno))[0].data f = plt.figure(figsize=(10,12)) #im = plt.imshow(img, cmap='hot') im = plt.imshow(img[480:580, 460:600], cmap='hot') plt.clim(1800, 2800) plt.colorbar(im, fraction=0.034, pad=0.04) plt.savefig("figure{0}.png".format(imno)) plt.show() numb = 1 plotfits(numb) numb = 2 plotfits(numb) """ Explanation: 0.2 Display images End of explanation """ partimg = fits.open(datadir+objname+'_01.fits')[0].data[480:580, 460:600] """ Explanation: 1. Photometry set up Select part of the image for ease of display End of explanation """ targcen = np.array([22,42]) ## target center compcen = np.array([75,125]) ## comparison center """ Explanation: Define starting values End of explanation """ searchr = 6 ## search box size ap_r = 2 ## aperture radius sky_inner = 3 sky_outer = 5 """ Explanation: Aperture photometry set up End of explanation """ def cent_weight(n): """ Assigns centroid weights """ wghts=np.zeros((n),np.float) for i in range(n): wghts[i]=float(i-n/2)+0.5 return wghts def calc_CoM(psf, weights): """ Finds Center of Mass of image """ cent=np.zeros((2),np.float) temp=sum(sum(psf) - min(sum(psf) )) print(temp) cent[1]=sum(( sum(psf) - min(sum(psf)) ) * weights)/temp cent[0]=sum(( sum(psf.T) - min(sum(psf.T)) ) *weights)/temp return cent """ Explanation: 1.1 Centroiding: Center of Mass Calculate Center of Mass (CoM) defined as: $\bar{x} = \frac{\sum A_i x_i}{\sum A_i }$, $\bar{y} = \frac{\sum A_i y_i}{\sum A_i }$. End of explanation """ ## Cut a box between search limits, centered around targcen targbox = partimg[targcen[0]-searchr : targcen[0]+searchr, targcen[1]-searchr : targcen[1]+searchr] weights = cent_weight(len(targbox)) tcenoffset = calc_CoM(targbox, weights) print(tcenoffset) tcenter = targcen + tcenoffset plt.plot(sum(targbox)) plt.show() compbox = partimg[compcen[0]-searchr : compcen[0]+searchr, compcen[1]-searchr : compcen[1]+searchr] compw = cent_weight(len(compbox)) ccenoffset = calc_CoM(compbox,compw) ccenter = compcen + ccenoffset print(tcenter) """ Explanation: Use centroiding algorithm to find the actual centers of the targe and comparison. End of explanation """ def circle(npix, r1): """ Builds a circle """ pup=np.zeros((npix,npix),np.int) for i in range(npix): for j in range(npix): r=np.sqrt((float(i-npix/2)+0.5)**2+(float(j-npix/2)+0.5)**2) if r<=r1: pup[i,j]=1 return pup """ Explanation: 1.2 Aperture Photometry Science Aperture End of explanation """ def annulus(npix, r_inner,r_outer=-1.): """ Builds an annulus """ pup=np.zeros((npix,npix),np.int) for i in range(npix): for j in range(npix): r=np.sqrt((float(i-npix/2)+0.5)**2+(float(j-npix/2)+0.5)**2) if ((r<=r_outer)&(r>=r_inner)): pup[i,j]=1 return pup """ Explanation: Sky annulus End of explanation """ circmask = circle(searchr*2, ap_r) annmask = annulus(searchr*2, sky_inner, sky_outer) """ Explanation: Extract values from regions Create mask End of explanation """ newtarg = partimg[int(round(tcenter[0]))-searchr : int(round(tcenter[0]))+searchr, int(round(tcenter[1]))-searchr : int(round(tcenter[1]))+searchr] newcomp = partimg[int(round(ccenter[0]))-searchr : int(round(ccenter[0]))+searchr, int(round(ccenter[1]))-searchr : int(round(ccenter[1]))+searchr] """ Explanation: Define new regions where the target and comparison are centered. End of explanation """ targaper = newtarg * circmask compaper = newcomp * circmask """ Explanation: Place mask on region End of explanation """ targann = newtarg * annmask compann = newcomp * annmask """ Explanation: Place mask on sky annulus slice. End of explanation """ im = plt.imshow(partimg, cmap='hot') plt.clim(1800, 2800) plt.scatter(targcen[1], targcen[0], c='g', marker='x') plt.scatter(compcen[1], compcen[0], c='g', marker='x') plt.scatter(tcenter[1], tcenter[0], c='b', marker='x') plt.scatter(ccenter[1], ccenter[0], c='b', marker='x') plt.show() """ Explanation: 1.3 Tests a. Display image with target and comparison centers before and after centroiding End of explanation """ im = plt.imshow(targaper, cmap='hot') plt.clim(1800, 2800) plt.show() im = plt.imshow(targann, cmap='hot') plt.clim(1800, 2800) plt.show() """ Explanation: b. Disply image with aperture mask and sky annulus End of explanation """ def calcsnr(target, bg): signal = target - bg noise = np.sqrt(signal + bg) snr = signal / noise return snr, noise """ Explanation: 2. Photometry 2.1 Calculate SNR Calculate Signal-to-Noise Ratio. CCD noise = sqrt(signal + background + dark current + read noise) End of explanation """ targc = np.sum(targaper) / np.sum(circmask) targbg= np.sum(targann) / np.sum(annmask) compc = np.sum(compaper) / np.sum(circmask) compbg= np.sum(compann) / np.sum(annmask) snr, noise = calcsnr(targc, targbg) print(snr) snr, noise = calcsnr(compc, compbg) print(snr) """ Explanation: Sum all flux inside target and comparison apertures and divide by number of pixels to get average count per pixel. End of explanation """ #Try a range of aperture sizes apertures = np.arange(1, 10, 1) snrold = 0 for aper in apertures: apermask = circle(searchr*2, aper) targc = np.sum(apermask*newtarg) / np.sum(apermask) snr, noise = calcsnr(targc, targbg) if snr > snrold: bestaper = aper snrold = snr snr, noise = calcsnr(targc, targbg) """ Explanation: 2.2 Optimize photometry aperture End of explanation """ targc = circle(searchr*2, ap_r)*newtarg targskyc = annulus(searchr*2, sky_inner, sky_outer)*newtarg compc = circle(searchr*2, ap_r)*newcomp compskyc = annulus(searchr*2, sky_inner, sky_outer)*newcomp ratio = np.sum(compc)/np.sum(targc) sigmaratio = ratio*np.sqrt((np.sum(targc)/np.sum(targskyc))**2 + (np.sum(compc)/np.sum(compc))**2) deltamag = -2.5*np.log10(ratio) sigmamag = 2.5*sigmaratio/(ratio*np.log(10)) refmag = 19.4 mag = refmag - deltamag print("Measured Magnitude = {:0.3f} ± {:0.3f}".format(mag, sigmamag)) """ Explanation: 2.3 Calculate the target's magnitude End of explanation """
ngovindaraj/Udacity_Projects
Data_Analysis/Data_Analysis.ipynb
mit
import pandas as pd import numpy as np from scipy import stats import matplotlib.pyplot as plt import seaborn as sns sns.set_style("whitegrid") %pylab inline titanic_data = pd.read_csv('./titanic_data.csv') titanic_data.head() # Checking data types by column titanic_data.dtypes # Checking for duplicate entries duplicates = titanic_data.duplicated().sum() print 'Duplicate Entries = ', duplicates # Removing variables that are not relevant to the analysis titanic_clean = titanic_data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis = 1) titanic_clean.head() # Checking for missing values titanic_clean.isnull().sum() # Group those with missing age based on Sex titanicNullAge = titanic_clean[titanic_clean['Age'].isnull()] titanicNullAge.groupby('Sex').size() """ Explanation: Author: Navina Govindaraj Date: April 2017 Investigating a Dataset The Titanic dataset has been chosen for this project. It contains demographics and passenger information from 891 of the 2224 passengers and crew on board the Titanic. Source: Kaggle Research Questions 1) Did survival differ by age and gender? 2) Did class play a role in survival? Data Wrangling End of explanation """ titanic_clean.isnull().sum() # Find mean age for each group (based on Sex) mean_age = titanic_clean.groupby("Sex")["Age"].mean() mean_age # Populating NA with mean ages for "male" and "female" titanic_clean["Age"].fillna(titanic_clean.groupby("Sex")["Age"]. transform("mean"), inplace=True) # Checking if ["Age"] has been populated with the mean values if titanic_clean.isnull()['Age'].sum() != 0: print("Fill all entities with NA Age failed!") # Checking if the mean remains unchanged after populating missing ages mean_age """ Explanation: To answer the research questions, 'Age' is the only variable that needs to be dealt with. The 177 missing values will be filled in with the mean age for sex ="male" and sex = "female" separately. Rows with missing ages are not being dropped from the analysis, since this constitute 20% of the dataset and losing this data would interfere with the results. End of explanation """ titanic_clean.describe() """ Explanation: Data Exploration and Visualization End of explanation """ f, axs = plt.subplots(figsize=(18, 5), ncols = 3) sns.set_palette("Set2") # Fig 1 - Survival Distribution sns.countplot(x="Survived", data=titanic_clean, alpha=.65, ax=axs[0]).set_title("Fig 1: Survival Distribution, (1 = Survived)") # Fig 2 - Survival by Age sns.boxplot(x="Survived", y="Age",data=titanic_clean, ax=axs[1]).set_title("Fig 2: Survival by Age, (1 = Survived)") # Fig 3 - Survival by Gender sns.countplot(y="Survived", hue="Sex", palette={"male":"m","female":"orange"}, data=titanic_clean, alpha=.55, ax=axs[2]).set_title("Fig 3: Survival by Gender, 1 = Survived") """ Explanation: The table above gives an overview of the dataset. Here are some important points to note. - There are 891 records of data in total - The mean age of people onboard was around 30 years - The average person seems to have traveled second class (mean= 2.3) - The middle 50% of the passengers were between the age group of 22 to 35 years Looking at the survival data graphically: End of explanation """ g = sns.factorplot(x="Sex", hue = "Survived", col="Pclass", data=titanic_clean, kind="count", size=4, aspect=1, alpha=.65) """ Explanation: From the above visualizations, only less than half of the 891 passengers survived the sinking of the Titanic. The age distributions for both groups have means that are very close. Despite the number of women passengers being much lower than men, in terms of survival, women survivors are more than men. This shows signs that being a woman increased the chance of survival. Drilling down into survival by class : End of explanation """ # Chi-squared test def chi_squared_test(col1, col2, isPrint=False): contingency_table = pd.crosstab(col1, col2) chi, p, dof, expected = stats.chi2_contingency(contingency_table) if(isPrint): print(contingency_table) print "Chi square: ", chi print "p-value: ", np.round(p, decimals=4) print "Degrees of freedom: ", dof print "\nExpected frequency:\n", expected return contingency_table, chi, p, dof, expected #Pclass vs. Survived (Chi-squared test) contingency_table, chi, p, dof, expected = chi_squared_test(titanic_clean["Pclass"], titanic_clean["Survived"], isPrint=True) """ Explanation: This shows that those who traveled third class were much more (primarily male passengers) than those traveled by the second and first classes respectively Among the women who survived, the highest number of them traveled first class. This indicates that the combination of being a woman, and traveling first class may have increased one's chance of survival. To test the significance between these variables, statistical hypothesis tests have been used. Statistical Inference and Hypothesis Testing Chi-Squared Test for Independence: 'Pclass' vs. 'Survived' $H_0$: Survival is NOT dependent of the travel class of the passenger $H_a$: Survival IS dependent on the travel class of the passenger End of explanation """ #Sex vs. Survived (Chi-squared test) contingency_table, chi, p, dof, expected = chi_squared_test(titanic_clean["Sex"], titanic_clean["Survived"], isPrint=True) """ Explanation: We reject the null hypothesis since p-value < α level of 0.05 Thus it can be concluded that survival is dependent on the travel class of the passenger Chi-Squared Test for Independence: 'Sex' vs. 'Survived' $H_0$: Survival is NOT dependent on the passenger's sex $H_a$: Survival IS dependent on the passenger's sex End of explanation """ # Visualizing both the distributions h = sns.FacetGrid(titanic_clean, col="Survived", hue="Survived").map(sns.distplot, "Age") h.set_axis_labels("Age", "KDE") # Mean age of passengers who died vs. survived (survived=1) print "Variance:" print titanic_clean.groupby("Survived")["Age"].var() print ('\n') print "Number of passengers:" print titanic_clean.groupby("Survived")["Age"].size() """ Explanation: We reject the null hypothesis since p-value < α level of 0.05 Thus it can be concluded that survival is dependent on the gender of the passenger Independent two-sample t-test: 'Age' vs. 'Survived' $H_0 : \mu_0 = \mu_1 $ There is no difference in mean age between the survivors and the victims $H_a : \mu_0 \neq \mu_1 $ The mean age of survivors is signifcantly different than the mean age of victims End of explanation """ x = titanic_clean[titanic_clean["Survived"]==0]["Age"] y = titanic_clean[titanic_clean["Survived"]==1]["Age"] stats.ttest_ind(x, y, equal_var=False) """ Explanation: Assumptions for Welch's t-test: - Both distributions are normally distributed - The variances for the survivors and victims are unequal - The sample sizes in both groups are unequal End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/cas/cmip6/models/fgoals-f3-l/toplevel.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'cas', 'fgoals-f3-l', 'toplevel') """ Explanation: ES-DOC CMIP6 Model Properties - Toplevel MIP Era: CMIP6 Institute: CAS Source ID: FGOALS-F3-L Sub-Topics: Radiative Forcings. Properties: 85 (42 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:53:44 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Flux Correction 3. Key Properties --&gt; Genealogy 4. Key Properties --&gt; Software Properties 5. Key Properties --&gt; Coupling 6. Key Properties --&gt; Tuning Applied 7. Key Properties --&gt; Conservation --&gt; Heat 8. Key Properties --&gt; Conservation --&gt; Fresh Water 9. Key Properties --&gt; Conservation --&gt; Salt 10. Key Properties --&gt; Conservation --&gt; Momentum 11. Radiative Forcings 12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2 13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4 14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O 15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3 16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3 17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC 18. Radiative Forcings --&gt; Aerosols --&gt; SO4 19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon 20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon 21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate 22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect 23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect 24. Radiative Forcings --&gt; Aerosols --&gt; Dust 25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic 26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic 27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt 28. Radiative Forcings --&gt; Other --&gt; Land Use 29. Radiative Forcings --&gt; Other --&gt; Solar 1. Key Properties Key properties of the model 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Top level overview of coupled model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of coupled model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Flux Correction Flux correction properties of the model 2.1. Details Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how flux corrections are applied in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Genealogy Genealogy and history of the model 3.1. Year Released Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Year the model was released End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.2. CMIP3 Parent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 CMIP3 parent if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. CMIP5 Parent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 CMIP5 parent if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.4. Previous Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Previously known as End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Software Properties Software properties of model 4.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.4. Components Structure Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how model realms are structured into independent software components (coupled via a coupler) and internal software components. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OASIS" # "OASIS3-MCT" # "ESMF" # "NUOPC" # "Bespoke" # "Unknown" # "None" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 4.5. Coupler Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Overarching coupling framework for model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Coupling ** 5.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of coupling in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.2. Atmosphere Double Flux Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Atmosphere grid" # "Ocean grid" # "Specific coupler grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 5.3. Atmosphere Fluxes Calculation Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Where are the air-sea fluxes calculated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.4. Atmosphere Relative Winds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Tuning Applied Tuning methodology for model 6.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics/diagnostics of the global mean state used in tuning model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics/diagnostics used in tuning model/component (such as 20th century) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.5. Energy Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.6. Fresh Water Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Conservation --&gt; Heat Global heat convervation properties of the model 7.1. Global Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how heat is conserved globally End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Atmos Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the atmosphere/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Atmos Land Interface Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how heat is conserved at the atmosphere/land coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.4. Atmos Sea-ice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the atmosphere/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.5. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.6. Land Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the land/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation --&gt; Fresh Water Global fresh water convervation properties of the model 8.1. Global Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how fresh_water is conserved globally End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.2. Atmos Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh_water is conserved at the atmosphere/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Atmos Land Interface Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how fresh water is conserved at the atmosphere/land coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.4. Atmos Sea-ice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh water is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.6. Runoff Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how runoff is distributed and conserved End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.7. Iceberg Calving Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how iceberg calving is modeled and conserved End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.8. Endoreic Basins Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how endoreic basins (no ocean access) are treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.9. Snow Accumulation Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how snow accumulation over land and over sea-ice is treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Key Properties --&gt; Conservation --&gt; Salt Global salt convervation properties of the model 9.1. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how salt is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10. Key Properties --&gt; Conservation --&gt; Momentum Global momentum convervation properties of the model 10.1. Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how momentum is conserved in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11. Radiative Forcings Radiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5) 11.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of radiative forcings (GHG and aerosols) implementation in model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2 Carbon dioxide forcing 12.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4 Methane forcing 13.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 13.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O Nitrous oxide forcing 14.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3 Troposheric ozone forcing 15.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3 Stratospheric ozone forcing 16.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC Ozone-depleting and non-ozone-depleting fluorinated gases forcing 17.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "Option 1" # "Option 2" # "Option 3" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.2. Equivalence Concentration Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Details of any equivalence concentrations used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Radiative Forcings --&gt; Aerosols --&gt; SO4 SO4 aerosol forcing 18.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon Black carbon aerosol forcing 19.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon Organic carbon aerosol forcing 20.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate Nitrate forcing 21.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 21.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect Cloud albedo effect forcing (RFaci) 22.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22.2. Aerosol Effect On Ice Clouds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative effects of aerosols on ice clouds are represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect Cloud lifetime effect forcing (ERFaci) 23.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.2. Aerosol Effect On Ice Clouds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative effects of aerosols on ice clouds are represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.3. RFaci From Sulfate Only Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative forcing from aerosol cloud interactions from sulfate aerosol only? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24. Radiative Forcings --&gt; Aerosols --&gt; Dust Dust forcing 24.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 24.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic Tropospheric volcanic forcing 25.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.2. Historical Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in historical simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.3. Future Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in future simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 25.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic Stratospheric volcanic forcing 26.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26.2. Historical Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in historical simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26.3. Future Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in future simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 26.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt Sea salt forcing 27.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 28. Radiative Forcings --&gt; Other --&gt; Land Use Land use forcing 28.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 28.2. Crop Change Only Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Land use change represented via crop change only? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 28.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "irradiance" # "proton" # "electron" # "cosmic ray" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 29. Radiative Forcings --&gt; Other --&gt; Solar Solar forcing 29.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How solar forcing is provided End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 29.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """
mediagit2016/workcamp-maschinelles-lernen-grundlagen
01-grundlagen/pandas.ipynb
gpl-3.0
# Import der Bibliotheken import pandas as pd # Extra packages import numpy as np import matplotlib.pyplot as plt # für die grafische Darstellung import seaborn as sns # für grafische Darstellung. Muss vorher evtl. installiert werden # jupyter notebook magic Befehl %matplotlib inline plt.rcParams['figure.figsize'] = (10,6) # größere Darstellung # im /data Ordner sollten apple.csv boeing.csv googl.csv microsoft.csv nike.csv liegen !ls ./data """ Explanation: Einführung in Pandas mit Aktiendaten und Beispielen zur Korrelation Author list: Alexander Fred-Ojala & Ikhlaq Sidh & Ramon Rank References / Sources: Includes examples from Wes McKinney and the 10min intro to Pandas License Agreement: Feel free to do whatever you want with this code What Does Pandas Do? <img src="https://github.com/mediagit2016/workcamp-maschinelles-lernen-grundlagen/raw/master/01-grundlagen/pandas-10.JPG"> What is a Pandas Table Object? <img src="https://github.com/mediagit2016/workcamp-maschinelles-lernen-grundlagen/raw/master/01-grundlagen/pandas-20.JPG"> Import Bibliotheken End of explanation """ # Wir probieren es mit einem array aus np.random.seed(0) # setze seed für Nachvollziebarkeit a1 = np.array(np.random.randn(3)) a2 = np.array(np.random.randn(3)) a3 = np.array(np.random.randn(3)) print (a1) print (a2) print (a3) # Wir erzeugen einen ersten Dataframe mit einem np.array - der Dataframe hat nur eine Spalte df0 = pd.DataFrame(a1) print(type(df0)) df0 # DataFrame aus einer Liste der np.arrays df0 = pd.DataFrame([a1, a2, a3]) df0 # beachte, dass keine Spalten Labels vorhanden sind, nur ganzzahlige Werte, # und der Index wird automatisch gesetzt # DataFrame von einem 2D np.array # 9 Normalverteilte Zufallszahlen np.random.randn() # mit reshape als 3x3 Matrix ax = np.random.randn(9).reshape(3,3) ax # DataFrame von einem 2D np.array # 24 Normalverteilte Zufallszahlen np.random.randn() # mit reshape als 3x8 Matrix at = np.random.randn(24).reshape(3,8) at # Setzen der Spalten Labels in ax df0 = pd.DataFrame(ax,columns=['rand_normal_1','Random Again','Third'], index=[100,200,99]) # wir können Spalten Bezeichnungen und einen Index zuweisen, dies muss aber in der Größe übereinstimmen #Ausgabe von df0 df0 # Setzen der Spalten Labels in ax df_0 = pd.DataFrame(at,columns=['rand_1','rand_2','rand_3', 'rand_4', 'rand_5','rand_6', 'rand_7', 'rand_8'], index=[0,1,2]) # wir können Spalten Bezeichnungen und einen Index zuweisen, dies muss aber in der Größe übereinstimmen # Ausgabe von df_0 df_0 # DataFrame aus einem Dictionary # arrays a1 und a2 dict1 = {'A':a1, 'B':a2} df1 = pd.DataFrame(dict1) #Ausgabe df1 df1 # Man kann leicht eine weitere Spalte hinzufügen (so wie man Werte in ein dictionary hinzufügt) df1['C']=a3 # Ausgabe df1 df1 # Wir können eine zusätzliche Spalte mit Text und Zahlen hinzufügen df1['L'] = ["Etwas", 3.2, "Worte"] df1 """ Explanation: Teil 1 Einfache Erzeugung und Veränderung von Panda Objekten Key Points: Pandas has two / three main data types: * Series (similar to numpy arrays, but with index) * DataFrames (table or spreadsheet with Series in the columns) * Panels (3D version of DataFrame, not as common) Es ist einfach einen DataFrame zu erzeugen Wir verwenden pd.DataFrame(**inputs**) und können jeden Datentyp als Argument verwenden Function: pd.DataFrame(data=None, index=None, columns=None, dtype=None, copy=False) Input data can be a numpy ndarray (structured or homogeneous), dict, or DataFrame. Dict can contain Series, arrays, constants, or list-like objects as the values. End of explanation """ print(df1[['L','A']]) print(type(df1['L'])) df1 # Spalten umbenennen df1 = df1.rename(columns = {'L':'Umbenannt'}) df1 # Löschen von Spalten del df1['C'] df1 # oder drop Spalten df1.drop('A',axis=1,inplace=True) # does not change df1 if we don't set inplace=True df1 df1 # oder drop Zeilen df1.drop(0) # Beispiel: Anzeige einer Spalte df1['B'] # Anzeigen verschiedener Spalten df1[['B','Umbenannt']] """ Explanation: Pandas Series Objekt Wie einnp.array, aber es können Datentypen kombiniert werden und eine Series hat einen eigenen Index Anmerkung: Jede Spalte in einem Dataframe ist eine Series End of explanation """ print (df1[0:2]) # ok df1 df1.iloc[1,1] df1 """ Explanation: Slicing <a href="https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html">[10 Minutes]</a> In the 10 min Pandas Guide, you will see many ways to view, slice a dataframe view/slice by rows, eg df[1:3], etc. view by index location, see df.iloc (iloc) view by ranges of labels, ie index label 2 to 5, or dates feb 3 to feb 25, see df.loc (loc) view a single row by the index df.xs (xs) or df.ix (ix) filtering rows that have certain conditions add column add row How to change the index and more... End of explanation """ !ls data/ # Wir können Daten aus dem Web herunterladen. Hierfür nutzen wir pd.read_csv # Ein CSV file ist ein comma seperated file base_url = 'https://google.com/finance?output=csv&q=' dfg = pd.read_csv('data/googl.csv').drop('Unnamed: 0',axis=1) # Google Aktien dfa = pd.read_csv('data/apple.csv').drop('Unnamed: 0',axis=1) #Apple Aktien dfg dfg.head() # zeige die ersten fünf Werte an dfg.tail(3) # zeige die letzten 3 Werte an dfg.columns # returns columns, can be used to loop over dfg.index # return """ Explanation: Teil 2 Beispiel Finance: Große Data Frames Wir laden Daten im CSV Format. See https://www.quantshare.com/sa-43-10-ways-to-download-historical-stock-quotes-data-for-free End of explanation """ dfg['Date'][0] type(dfg['Date'][0]) # Date wird als String gelistet, sollte deshalb in datetime umgewandelt werden dfg.index = pd.to_datetime(dfg['Date']) # setzen des neuen index dfg.drop(['Date'],axis=1,inplace=True) dfg.head() print(type(dfg.index[0])) dfg.index[0] dfg.index dfg['2017-08':'2017-06'] """ Explanation: Umwandeln des Index in pandas datetime Objekt End of explanation """ dfg.shape # 251 business days last year dfg.columns dfg.size # Generelle statistische Daten mit describe() dfg.describe() # Boolean indexing dfg['Open'][dfg['Open']>1130] # check what dates the opening # Check where Open, High, Low and Close where greater than 1130 dfg[dfg>1000].drop('Volume',axis=1) # If you want the values in an np array dfg.values """ Explanation: Attributes & general statistics of a Pandas DataFrame End of explanation """ # Getting a cross section with .loc - BY VALUES of the index and columns # df.loc[a:b, x:y], by rows and column location # Note: You have to know indices and columns dfg.loc['2017-08-31':'2017-08-21','Open':'Low'] """ Explanation: .loc() End of explanation """ # .iloc slicing at specific location - BY POSITION in the table # Recall: # dfg[a:b] by rows # dfg[[col]] or df[[col1, col2]] by columns # df.loc[a:b, x:y], by index and column values + location # df.iloc[3:5,0:2], numeric position in table dfg.iloc[1:4,3:5] # 2nd to 4th row, 4th to 5th column """ Explanation: .iloc() End of explanation """ # We can change the index sorting dfg.sort_index(axis=0, ascending=True).head() # starts a year ago # sort by value dfg.sort_values(by='Open')[0:10] """ Explanation: More Basic Statistics End of explanation """ dfg[dfg>1115].head(10) # we can also drop all NaN values dfg[dfg>1115].head(10).dropna() dfg2 = dfg # make a copy and not a view dfg2 is dfg """ Explanation: Boolean End of explanation """ # Recall dfg.head(4) # All the ways to view # can also be used to set values # good for data normalization dfg['Volume'] = dfg['Volume']/100000.0 dfg.head(4) """ Explanation: Setting Values End of explanation """ # mean by column, also try var() for variance dfg.mean() # Verainz für jede Spalte dfg.var() dfg[0:5].mean(axis = 1) # Mittelwert der Zeilen für die ersten 5 Zeilen """ Explanation: More Statistics and Operations End of explanation """ # Reload dfg = pd.read_csv('data/googl.csv').drop('Unnamed: 0',axis=1) # Google stock data dfa = pd.read_csv('data/apple.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfm = pd.read_csv('data/microsoft.csv').drop('Unnamed: 0',axis=1) # Google stock data dfn = pd.read_csv('data/nike.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfb = pd.read_csv('data/boeing.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfb.head() # Rename columns dfg = dfg.rename(columns = {'Close':'GOOG'}) #print (dfg.head()) dfa = dfa.rename(columns = {'Close':'AAPL'}) #print (dfa.head()) dfm = dfm.rename(columns = {'Close':'MSFT'}) #print (dfm.head()) dfn = dfn.rename(columns = {'Close':'NKE'}) #print (dfn.head()) dfb = dfb.rename(columns = {'Close':'BA'}) dfb.head(2) # Lets merge some tables # They will all merge on the common column Date df = dfg[['Date','GOOG']].merge(dfa[['Date','AAPL']]) df = df.merge(dfm[['Date','MSFT']]) df = df.merge(dfn[['Date','NKE']]) df = df.merge(dfb[['Date','BA']]) df.head() df['Date'] = pd.to_datetime(df['Date']) df = df.set_index('Date') df.head() df.plot() df['2017'][['NKE','BA']].plot() # show a correlation matrix (pearson) crl = df.corr() crl crl.sort_values(by='GOOG',ascending=False) s = crl.unstack() so = s.sort_values(ascending=False) so[so<1] df.mean() sim=df-df.mean() sim.tail() sim[['MSFT','BA']].plot() """ Explanation: PlotCorrelation Load several stocks End of explanation """
edhenry/notebooks
Sequential and Binary Search in Python.ipynb
mit
# Finding a single integer in an array of integers using Python's `in` # operator 15 in [3,5,6,9,12,11] """ Explanation: This notebook will include examples of searching and sorting algorithms implemented in python. It is both for my own learning, and for anyone else who would like to use this notebook for anything they'd like. Searching Finding an item in a collection of items is a pretty typical search problem. Depending on the implementation, a search will tend to return a True or False boolean answer to the question of "is this item contained within this collection of items?". An example of this can be seen below, using Pythons in operator. End of explanation """ # Finding a single integer in an array of integers using Python's `in` # operator 11 in [3,5,6,9,12,11] """ Explanation: We can see this returns a boolean answer of False, indicating that the integer isn't present in the array. Below is another example where the answer is True. End of explanation """ # Search sequentially through a list, incrementing the position counter # if is_present is not True, otherwise set is_present to True and return def sequential_search(li, item): position = 0 is_present = False while position < len(li) and not is_present: if li[position] == item: is_present = True else: position = position + 1 return is_present test_array = [1,31,5,18,7,10,25] print(sequential_search(test_array, 2)) print(sequential_search(test_array, 25)) """ Explanation: Python provides useful abstractions like this for a lot of search and sort functionality, but it's important to understand what's going on 'under the hood' of these functions. Sequential Search Unordered array Datum, in arrays such as the ones used in the examples above, are typically stores in a collection such as a list. These datum within these lists have linear, or sequential relationship. They are each stores in a position within the array, relative to the other datum. When searching for a specific datum within the array, we are able to seqeuntially evaluate each item in the list, or array, to see if it matches the item we're looking for. Using sequential_search, we simply move from item to item in the list, evaluating whether our search expression is True, or False. End of explanation """ def ordered_sequential_search(li, item): position = 0 found = False stop = False while position < len(li) and not found and not stop: if li[position] == item: found == True else: if li[position] > item: stop = True else: position = (position + 1) return found test_li = [0,2,3,4,5,6,7,12,15,18,23,27,45] print(ordered_sequential_search(test_li, 25)) """ Explanation: The example above uses an example of uses an unordered list. Because this list is unordered, we will need to evaluate every item in the list to understand if it is the item that we're searching for. Because this is the case, the computational complexity of our sequential_search function is $O(n)$. Here is a table summarizing the cases : | Case | Best Case | Worst Case | Average Case | |--------------------|-----------|------------|--------------| | item is present | 1 | $n$ | $\frac{n}{2}$| | item isn't present | $n$ | $n$ | $n$ | This can be seen as such : For every $n$ and every input size of $n$, the following is true: The while loop is executed at most $n$ times position is incremented on each iteration, so position > $n$ after $n$ iterations. Each iteration takes $c$ steps for some constant $c$ $d$ steps are taken outside of the loop, for some constant $d$ Therefore for all inputs of size $n$, the time needed for the entire search is at most $(cn+d) = O(n)$. At worst, the item $x$ we're searching for is the last item in the entire list of items. This can be seen as $A[n] = x$ and $A[i] \ne x$ for all $i$ s.t. $1 \le i \lt n$ Ordered array If we assume that the list, or array, that we're searching over is ordered, say from low to high, the chance of the item we're looking for being in any one of the $n$ positions is still the same. However, if the item is not present we have a slight advantage in that the item that we're looking for may never be present past another item of greater value. For example, if we're looking for the number 25, and through the process of searching through the array, we happen upon the number 27, we know that no other integers past number 27 will have the value that we're looking for. End of explanation """ # Binary search example def binary_search(li, item): first = 0 last = (len(li) - 1) found = False while first <= last and not found: midpoint = ((first + last)//2) if li[midpoint] == item: found = True else: if item < li[midpoint]: last = (midpoint - 1) else: first = (midpoint + 1) return found test_li = [0,2,3,4,5,8,10,15,17,21,25,32,42,45] print(binary_search(test_li, 45)) """ Explanation: We can see that we are able to terminate the execution of the search because we've found a number greater than the number we're searching for with the assumption that the list being passed into the function is ordered, we know we can terminate the computation. Modifying the table above, we can see that with the item not present in our array, we save some computational cycles in the negative case. | Case | Best Case | Worst Case | Average Case | |--------------------|-----------|------------|--------------| | item is present | 1 | $n$ | $\frac{n}{2}$| | item isn't present | $n$ | $n$ | $\frac{n}{2}$| This can prove really useful if we can somehow, somewhere else in our data structure definitions, that we can guarantee ordering of our arrays. This example is left for future work as it's more abstract to just the search examples we're displaying here. Binary Search With sequential search we start by evaluating the first entry of array for whether or not it matches the the item that we're looking for, and if it does not we proceed through the entire collection, trying to find a match. There are at most, at any time, $n-1$ more items to look at if the item we're currently evaluating is not the one we're looking for. Binary search takes a bit of a different approach to the problem. Instead of searching through the collection, sequentially, starting with the first item in the list or array, the process starts at the middle. If the middle item of the list is not the item that we're looking for, and is larger than the middle value, we can drop the entire bottom half of the list and save ourselves that much computation time. End of explanation """
hetaodie/hetaodie.github.io
assets/media/uda-ml/supervisedlearning/jc/为慈善机构寻找捐助者/.Trash-0/files/finding_donors-zh.ipynb
mit
# Import libraries necessary for this project import numpy as np import pandas as pd from time import time from IPython.display import display # Allows the use of display() for DataFrames # Import supplementary visualization code visuals.py import visuals as vs # Pretty display for notebooks %matplotlib inline # Load the Census dataset data = pd.read_csv("census.csv") # Success - Display the first record display(data.head(n=1)) """ Explanation: 机器学习工程师纳米学位 监督式学习 项目:为 CharityML 寻找捐赠者 欢迎来到机器学习工程师纳米学位的第二个项目!在此 notebook 中,我们已经为你提供了一些模板代码,你需要实现其他必要功能,以便成功地完成此项目。以实现开头的部分表示你必须为下面的代码块提供额外的功能。我们将在每部分提供说明,并在代码块中用 'TODO' 语句标记具体的实现要求。请务必仔细阅读说明! 除了实现代码之外,你必须回答一些问题,这些问题与项目和你的实现有关。每个部分需要回答的问题都在开头以问题 X 标记。请仔细阅读每个问题并在下面以答案:开头的文本框中提供详细的答案。我们将根据你的每个问题答案和所提供的实现代码评估你提交的项目。 注意: 在提交此 notebook 时,请注明你所使用的 PYTHON 版本。你可以使用键盘快捷键 Shift + Enter 执行代码和 Markdown 单元格。此外,可以通过双击进入编辑模式,编辑 Markdown 单元格。 开始 在此项目中,你将自己选择实现几个监督式算法,并使用从 1994 年美国人口普查数据中摘取的数据准确地对个人收入进行建模。然后,你将根据初步结果选择最佳候选算法,并进一步优化该算法,以便构建最佳模型。你的目标是构建一个准确预测公民收入是否超过 50,000 美元的模型。公益机构可能会面临此类任务,这些机构需要依赖捐赠。了解公民的收入可以帮助公益机构更好地判断应该请求多少捐赠款,或者是否有必要请求捐赠。虽然直接通过公开的数据判断个人的一般收入范围比较难,但是我们可以通过其他公开特征推断该值,稍后我们就有机会见到这种推断过程。 该项目的数据集来自 UCI 机器学习资源库。该数据集是由 Ron Kohavi 和 Barry Becker 捐赠的,他们之前在文章_“Scaling Up the Accuracy of Naive-Bayes Classifiers: A Decision-Tree Hybrid”_中发表了该数据集。你可以在此处找到 Ron Kohavi 的文章。我们在此项目中研究的数据与原始数据集稍有出入,例如删除了 'fnlwgt' 特征和缺少条目或者格式糟糕的记录。 探索数据 运行以下代码单元格以加载必要的 Python 库并加载人口普查数据。注意,该数据集中的最后一列 'income' 将是目标标签(个人年收入是否超过 50,000 美元)。人口普查数据库中的所有其他列都是关于每个人的特征。 End of explanation """ # TODO: Total number of records n_records = None # TODO: Number of records where individual's income is more than $50,000 n_greater_50k = None # TODO: Number of records where individual's income is at most $50,000 n_at_most_50k = None # TODO: Percentage of individuals whose income is more than $50,000 greater_percent = None # Print the results print("Total number of records: {}".format(n_records)) print("Individuals making more than $50,000: {}".format(n_greater_50k)) print("Individuals making at most $50,000: {}".format(n_at_most_50k)) print("Percentage of individuals making more than $50,000: {}%".format(greater_percent)) """ Explanation: <div> <style> .dataframe thead tr:only-child th { text-align: right; } ### 实现:数据探索 大致研究数据集后可以判断每个类别有多少人,并得出年收入超过 50,000 美元的个人所占百分比。在下面的代码单元格中,你将需要计算以下值: - 记录总条数:`'n_records'` - 年收入超过 50,000 美元的人数:`'n_greater_50k'`. - 年收入不超过 50,000 美元的人数:`'n_at_most_50k'`. - 年收入超过 50,000 美元的个人所占百分比:`'greater_percent'`. ** 提示:**你可能需要查看上述表格,了解 `'income'` 条目的格式。 End of explanation """ # Split the data into features and target label income_raw = data['income'] features_raw = data.drop('income', axis = 1) # Visualize skewed continuous features of original data vs.distribution(data) """ Explanation: 特征集探索 age:连续值。 workclass:Private、Self-emp-not-inc、Self-emp-inc、Federal-gov、Local-gov、State-gov、Without-pay、Never-worked。 education:Bachelors、Some-college、11th、HS-grad、Prof-school、Assoc-acdm、Assoc-voc、9th、7th-8th、12th、Masters、1st-4th、10th、Doctorate、5th-6th、Preschool。 education-num:连续值。 marital-status:Married-civ-spouse、Divorced、Never-married、Separated、Widowed、Married-spouse-absent、Married-AF-spouse。 occupation:Tech-support、Craft-repair、Other-service、Sales、Exec-managerial、Prof-specialty、Handlers-cleaners、Machine-op-inspct、Adm-clerical、Farming-fishing、Transport-moving、Priv-house-serv、Protective-serv、Armed-Forces。 relationship:Wife、Own-child、Husband、Not-in-family、Other-relative、Unmarried。 race:Black、White、Asian-Pac-Islander、Amer-Indian-Eskimo、Other。 sex:Female、Male。 capital-gain:连续值。 capital-loss:连续值。 hours-per-week:连续值。 native-country:United-States、Cambodia、England、Puerto-Rico、Canada、Germany、Outlying-US(Guam-USVI-etc)、India、Japan、Greece、South、China、Cuba、Iran、Honduras、Philippines、Italy、Poland、Jamaica、Vietnam、Mexico、Portugal、Ireland、France、Dominican-Republic、Laos、Ecuador、Taiwan、Haiti、Columbia、Hungary、Guatemala、Nicaragua、Scotland、Thailand、Yugoslavia、El-Salvador、Trinadad&Tobago、Peru、Hong、Holand-Netherlands。 准备数据 在将数据作为机器学习算法的输入之前,通常必须整理数据、调整数据格式和结构,这一流程通常称之为预处理。幸运的是,该数据集没有必须处理的无效或丢失条目,但是某些特征质量不高,必须加以调整。预处理流程可以大大改善几乎所有学习算法的输出结果和预测能力。 转换偏斜连续特征 数据集可能通常至少包含一个具有以下特性的特征:值几乎都接近某个数字,但是也有极端值或比该数字大很多或小很多的值。算法会受到此类值分布的影响,如果值的范围没有正确标准化,算法的效果会大打折扣。对于人口普查数据集来说,有两个特征属于这种情况:'capital-gain' 和 'capital-loss'。 运行以下代码单元格以为这两个特征绘制直方图。注意值的范围以及分布情况。 End of explanation """ # Log-transform the skewed features skewed = ['capital-gain', 'capital-loss'] features_log_transformed = pd.DataFrame(data = features_raw) features_log_transformed[skewed] = features_raw[skewed].apply(lambda x: np.log(x + 1)) # Visualize the new log distributions vs.distribution(features_log_transformed, transformed = True) """ Explanation: 对于 'capital-gain' 和 'capital-loss' 等高度偏斜的特征分布,通常我们都会对数据应用<a href="https://en.wikipedia.org/wiki/Data_transformation_(statistics)">对数转换</a>,以便非常大和非常小的值不会对学习算法的性能带来负面影响。对数转换可以显著缩小离群值造成的值范围。但是在应用这种转换时必须谨慎:0 的对数未定义,因此我们必须让这些值加上一个比 0 大的很小的值,以便成功地应用对数算法。 运行以下代码单元格以对数据进行转换并可视化结果。同样,注意值的范围和分布情况。 End of explanation """ # Import sklearn.preprocessing.StandardScaler from sklearn.preprocessing import MinMaxScaler # Initialize a scaler, then apply it to the features scaler = MinMaxScaler() # default=(0, 1) numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week'] features_log_minmax_transform = pd.DataFrame(data = features_log_transformed) features_log_minmax_transform[numerical] = scaler.fit_transform(features_log_transformed[numerical]) # Show an example of a record with scaling applied display(features_log_minmax_transform.head(n = 5)) """ Explanation: 标准化数字特征 除了需要对高度偏斜的特征进行转换之外,通常还建议对数字特征进行某种缩放。对数据进行缩放不会更改每个特征(例如上述 'capital-gain' 或 'capital-loss')的分布形状;但是,标准化可以确保在应用监督式学习器时,能够平等地对待每个特征。注意应用缩放之后,观察原始形式的数据将不再具有相同的原始含义,如下所示。 运行以下代码单元格以标准化每个数字特征。为此,我们将使用 sklearn.preprocessing.MinMaxScaler。 End of explanation """ # TODO: One-hot encode the 'features_log_minmax_transform' data using pandas.get_dummies() features_final = None # TODO: Encode the 'income_raw' data to numerical values income = None # Print the number of features after one-hot encoding encoded = list(features_final.columns) print("{} total features after one-hot encoding.".format(len(encoded))) # Uncomment the following line to see the encoded feature names # print encoded """ Explanation: 实现:数据预处理 在上面的探索数据表格中,我们发现每个记录都有多个特征是非数字特征。通常,学习算法都预期输入是数字,这就需要转换非数字特征(称为分类变量)。一种转换分类变量的常见方式是独热编码方法。独热编码会为每个非数字特征的每个可能类别创建一个_“虚拟”_变量。例如,假设 someFeature 有三个潜在条目:A、B 或 C。我们将此特征编码为 someFeature_A、someFeature_B 和 someFeature_C。 | | someFeature | | someFeature_A | someFeature_B | someFeature_C | | :-: | :-: | | :-: | :-: | :-: | | 0 | B | | 0 | 1 | 0 | | 1 | C | ----> one-hot encode ----> | 0 | 0 | 1 | | 2 | A | | 1 | 0 | 0 | 此外,和非数字特征一样,我们需要将非数字目标标签 'income' 转换为数字值,以便学习算法能正常运行。因为此标签只有两个可能的类别(“<=50K”和“>50K”),我们可以直接将这两个类别分别编码为 0 和 1,而不用采用独热编码。在下面的代码单元格中,你需要实现以下步骤: 使用 pandas.get_dummies() 对 'features_log_minmax_transform' 数据进行独热编码。 将目标标签 'income_raw' 转换为数字条目。 将“<=50K”的记录设为 0,并将“>50K”的记录设为 1。 End of explanation """ # Import train_test_split from sklearn.cross_validation import train_test_split # Split the 'features' and 'income' data into training and testing sets X_train, X_test, y_train, y_test = train_test_split(features_final, income, test_size = 0.2, random_state = 0) # Show the results of the split print("Training set has {} samples.".format(X_train.shape[0])) print("Testing set has {} samples.".format(X_test.shape[0])) """ Explanation: 随机打乱并拆分数据 现在,所有_分类变量_都已转换成数字特征,所有数字特征都已标准化。像往常那样,现在我们将数据(包括特征和标签)拆分为训练集和测试集。80% 的数据用于训练,20% 用于测试。 运行以下代码单元格以进行拆分。 End of explanation """ ''' TP = np.sum(income) # Counting the ones as this is the naive case. Note that 'income' is the 'income_raw' data encoded to numerical values done in the data preprocessing step. FP = income.count() - TP # Specific to the naive case TN = 0 # No predicted negatives in the naive case FN = 0 # No predicted negatives in the naive case ''' # TODO: Calculate accuracy, precision and recall accuracy = None recall = None precision = None # TODO: Calculate F-score using the formula above for beta = 0.5 and correct values for precision and recall. fscore = None # Print the results print("Naive Predictor: [Accuracy score: {:.4f}, F-score: {:.4f}]".format(accuracy, fscore)) """ Explanation: 评估模型效果 在此部分,我们将研究四种不同的算法,并判断哪个算法最适合对数据进行建模。其中三个算法将为监督式学习器(你可以随意选择),第四个算法称为朴素预测器。 指标和朴素预测器 CharityML 研究后发现,收入超过 50,000 美元的个人最有可能向他们的组织捐赠。因此,CharityML 非常希望准确地知道哪些人的收入超过了 50,000 美元。似乎使用准确率作为评估模型效果的指标比较合适。此外,将收入不到 50,000 美元的个人预测为收入超过 50,000 美元对 CharityML 有不利的影响,因为他们希望找到愿意捐赠的个人。因此,模型能够准确地预测收入超过 50,000 美元的个人比模型能够召回这些个人_更重要_。我们可以使用 F-β 分数作为同时考虑精确率和召回率的指标: $$ F_{\beta} = (1 + \beta^2) \cdot \frac{precision \cdot recall}{\left( \beta^2 \cdot precision \right) + recall} $$ 具体而言,当 $\beta = 0.5$ 时,我们更关注精确率。这称为 F$_{0.5}$ 分数(简称 F-分数)。 查看分类数据(收入最多达到 50,000 美元和收入超过 50,000 美元的人群)分布图之后,很明显大部分人收入不超过 50,000 美元。这样会对准确率带来很大的影响,因为我们可以不用查看数据直接说“此人收入不超过 50,000 美元”,并且通常都正确!发表这种言论比较幼稚,因为我们都没有考虑任何信息来支持这一言论。考虑数据的朴素预测始终很重要,因为这样可以建立判断模型是否效果很好的基准。但是,使用这种预测毫无意义:如果我们预测所有人的收入都不到 50,000 美元,则 CharityML 无法发现任何捐赠者。 注意:准确率、精确率、召回率总结 准确率衡量的是分类器做出正确预测的概率,即正确预测的数量与预测总数(测试数据点的数量)之比。 精确率指的是分类为垃圾短信的短信实际上是垃圾短信的概率,即真正例(分类为垃圾内容并且实际上是垃圾内容的字词)与所有正例(所有分类为垃圾内容的字词,无论是否分类正确)之比,换句话说,是以下公式的比值结果: [True Positives/(True Positives + False Positives)] 召回率(敏感性)表示实际上为垃圾短信并且被分类为垃圾短信的短信所占比例,即真正例(分类为垃圾内容并且实际上是垃圾内容的字词)与所有为垃圾内容的字词之比,换句话说,是以下公式的比值结果: [True Positives/(True Positives + False Negatives)] 对于偏态分类分布问题(我们的数据集就属于偏态分类),例如如果有 100 条短信,只有 2 条是垃圾短信,剩下的 98 条不是,则准确率本身并不是很好的指标。我们将 90 条消息分类为垃圾内容(包括 2 条垃圾内容,但是我们将其分类为非垃圾内容,因此它们属于假负例),并将 10 条消息分类为垃圾内容(所有 10 个都是假正例),依然会获得比较高的准确率分数。对于此类情形,精确率和召回率非常实用。可以通过这两个指标获得 F1 分数,即精确率和召回率分数的加权平均值(调和平均数)。该分数的范围是 0 到 1,1 表示最佳潜在 F1 分数(在计算比值时取调和平均数)。 问题 1 - 朴素预测器效果 如果我们选择一个始终预测个人收入超过 50,000 美元的模型,该模型在该数据集中的准确率和 F-分数是多少?你必须使用以下代码单元格并将结果赋值给 'accuracy' 和 'fscore' 以供稍后使用。 请注意:生成朴素预测器的目的是展示没有任何智能信息的基本模型是怎样的模型。在现实生活中,理想情况下,基本模型要么是以前模型的结果,要么基于你希望完善的研究论文。如果没有基准模型,获得比随机选择的模型结果更好的模型是一个不错的起点。 提示: 如果模型始终预测“1”(即个人收入超过 5 万美元),则模型没有真负例 (TN) 或假负例 (FN),因为我们没有做出任何负面预测( “0”值)。因此,在这种情况下,准确率和精确率一样(真正例/)真正例 + 假正例)),因为值为“1”但是应该为“0”的预测都变成假正例;因此,这种情况下的分母是记录总数。 在这种情况下,召回率分数(真正例/(真正例 + 假负例))变成 1,因为没有假负例。 End of explanation """ # TODO: Import two metrics from sklearn - fbeta_score and accuracy_score def train_predict(learner, sample_size, X_train, y_train, X_test, y_test): ''' inputs: - learner: the learning algorithm to be trained and predicted on - sample_size: the size of samples (number) to be drawn from training set - X_train: features training set - y_train: income training set - X_test: features testing set - y_test: income testing set ''' results = {} # TODO: Fit the learner to the training data using slicing with 'sample_size' using .fit(training_features[:], training_labels[:]) start = time() # Get start time learner = None end = time() # Get end time # TODO: Calculate the training time results['train_time'] = None # TODO: Get the predictions on the test set(X_test), # then get predictions on the first 300 training samples(X_train) using .predict() start = time() # Get start time predictions_test = None predictions_train = None end = time() # Get end time # TODO: Calculate the total prediction time results['pred_time'] = None # TODO: Compute accuracy on the first 300 training samples which is y_train[:300] results['acc_train'] = None # TODO: Compute accuracy on test set using accuracy_score() results['acc_test'] = None # TODO: Compute F-score on the the first 300 training samples using fbeta_score() results['f_train'] = None # TODO: Compute F-score on the test set which is y_test results['f_test'] = None # Success print("{} trained on {} samples.".format(learner.__class__.__name__, sample_size)) # Return the results return results """ Explanation: 监督式学习模型 以下是 scikit-learn 中目前提供的一些监督式学习模型,你可以从中选择几个模型: - 高斯朴素贝叶斯 (GaussianNB) - 决策树 - 集成方法(Bagging、AdaBoost、随机森林法、Gradient Boosting) - K 近邻法 (KNeighbors) - 随机梯度下降法分类器 (SGDC) - 支持向量机 (SVM) - 逻辑回归 问题 2 - 模型应用 从上述监督式学习模型中选择三个你认为适合解决该问题的模型,并且你将用这些模型对该人口普查数据进行检验。对于所选的每个模型 描述该模型的一个实际应用领域。 该模型的优势是什么;何时效果很好? 该模型的缺点是什么;何时效果很差? 根据你对数据的了解情况,为何该模型适合解决该问题? 提示: 请按照上述格式填写答案^,针对你所选的三个模型分别回答这 4 个问题。请在答案中附上参考资料。 答案: 实现 - 创建训练和预测管道 为了准确地评估你所选的每个模型的效果,你需要创建一个训练管道和预测管道,使你能够使用各种规模的训练数据快速有效地训练模型,并且对测试数据进行预测。这一部分的实现将用在后面的部分。你需要在下面的代码单元格中实现以下步骤: - 从 sklearn.metrics 中导入 fbeta_score 和 accuracy_score。 - 将学习器与取样训练数据拟合,并记录训练时间。 - 对测试数据 X_test 进行预测,并对前 300 个训练数据点 X_train[:300] 进行预测。 - 记录总的预测时间。 - 计算训练子集和测试集的准确率分数 - 计算训练子集和测试集的 F 分数。 - 确保设置 beta 参数! End of explanation """ # TODO: Import the three supervised learning models from sklearn # TODO: Initialize the three models clf_A = None clf_B = None clf_C = None # TODO: Calculate the number of samples for 1%, 10%, and 100% of the training data # HINT: samples_100 is the entire training set i.e. len(y_train) # HINT: samples_10 is 10% of samples_100 (ensure to set the count of the values to be `int` and not `float`) # HINT: samples_1 is 1% of samples_100 (ensure to set the count of the values to be `int` and not `float`) samples_100 = None samples_10 = None samples_1 = None # Collect results on the learners results = {} for clf in [clf_A, clf_B, clf_C]: clf_name = clf.__class__.__name__ results[clf_name] = {} for i, samples in enumerate([samples_1, samples_10, samples_100]): results[clf_name][i] = \ train_predict(clf, samples, X_train, y_train, X_test, y_test) # Run metrics visualization for the three supervised learning models chosen vs.evaluate(results, accuracy, fscore) """ Explanation: 实现:初始模型评估 你需要在代码单元格中实现以下步骤: - 导入你在前一部分所选的三个监督式学习模型。 - 初始化这三个模型并将它们存储在 'clf_A'、'clf_B' 和 'clf_C'。 - 针对每个模型使用 'random_state'(如果提供了的话)。 - 注意:使用每个模型的默认设置——你将在后面的部分调节一个特定的模型。 - 计算 1%、10% 和 100% 的训练数据所包含的条目数。 - 分别将这些值存储在 'samples_1'、 'samples_10' 和 'samples_100' 中。 注意:根据你所选的算法,以下实现可能需要一段时间才能运行完毕! End of explanation """ # TODO: Import 'GridSearchCV', 'make_scorer', and any other necessary libraries # TODO: Initialize the classifier clf = None # TODO: Create the parameters list you wish to tune, using a dictionary if needed. # HINT: parameters = {'parameter_1': [value1, value2], 'parameter_2': [value1, value2]} parameters = None # TODO: Make an fbeta_score scoring object using make_scorer() scorer = None # TODO: Perform grid search on the classifier using 'scorer' as the scoring method using GridSearchCV() grid_obj = None # TODO: Fit the grid search object to the training data and find the optimal parameters using fit() grid_fit = None # Get the estimator best_clf = grid_fit.best_estimator_ # Make predictions using the unoptimized and model predictions = (clf.fit(X_train, y_train)).predict(X_test) best_predictions = best_clf.predict(X_test) # Report the before-and-afterscores print("Unoptimized model\n------") print("Accuracy score on testing data: {:.4f}".format(accuracy_score(y_test, predictions))) print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, predictions, beta = 0.5))) print("\nOptimized Model\n------") print("Final accuracy score on the testing data: {:.4f}".format(accuracy_score(y_test, best_predictions))) print("Final F-score on the testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5))) """ Explanation: 改善结果 在最后一部分,你将从三个监督式学习模型中选择用于学员数据的最佳模型。然后,你将通过调整至少一个参数,改善未调整模型的 F 分数,从而用整个训练集(X_train 和 y_train)对模型进行网格搜索优化。 问题 3 - 选择最佳模型 根据你之前的评估,用一两段文字向 CharityML 解释:在三个模型中,你认为哪个模型最适合发现收入超过 50,000 美元的个人。 提示: 查看上述单元格左下角的图表(图根据 vs.evaluate(results, accuracy, fscore)创建而成),并检查在使用所有训练集时测试集的 F 分数。哪个模型的分数最高?你的答案应该涉及以下内容: * 指标 - 如果使用了所有的训练数据,则给出测试 F 分数 * 预测/训练时间 * 算法对数据的适用情况。 答案: 问题 4 - 用通俗的语言描述模型 用一两段通俗的文字向 CharityML 解释:为何所选的最终模型能够完成任务。确保描述该模型的主要特性,例如模型的训练效果和预测效果。避免使用深奥的数学术语,例如描述方程式。 提示: 在解释模型时,如果你使用了外部资源,请注明引用的所有资源。 答案: 实现:模型调整 细调所选模型。使用网格搜索 (GridSearchCV),并且至少用 3 个不同的值对至少一个重要参数进行调整。为此,你需要使用整个训练集。你需要在下面的代码单元格中实现以下步骤: - 导入 sklearn.grid_search.GridSearchCV 和 sklearn.metrics.make_scorer。 - 初始化你所选的分类器,并将其存储在 clf 中。 - 将 random_state(如果有的话)设为之前设置的状态。 - 针对所选模型创建一个你要调整的参数字典。 - 示例:parameters = {'parameter' : [list of values]}。 - 注意:避免调整学习器的 max_features 参数(如果有的话)! - 使用 make_scorer 创建 fbeta_score 评分对象 ($\beta = 0.5$)。 - 使用 'scorer' 对分类器 clf 进行网格搜索,并将其存储在 grid_obj 中。 - 将网格搜索对象与训练数据 (X_train, y_train) 进行拟合,并将其存储在 grid_fit 中。 注意:根据你所选的算法和参数列表,以下实现可能需要一段时间才能运行完毕! End of explanation """ # TODO: Import a supervised learning model that has 'feature_importances_' # TODO: Train the supervised model on the training set using .fit(X_train, y_train) model = None # TODO: Extract the feature importances using .feature_importances_ importances = None # Plot vs.feature_plot(importances, X_train, y_train) """ Explanation: 问题 5 - 最终模型评估 优化模型在测试数据中的准确率和 F 分数是多少? 这些分数比未优化模型的分数更高,还是更低? 优化模型与在问题 1 中得出的朴素预测器基准相比,效果如何? 注意:将结果填写在下面的表格中,然后在答案文本框内填写描述内容。 结果: | 指标 | 未优化模型 | 优化模型 | | :--------: | :--------: | :------: | | 准确率分数 | | | | F 分数 | | 示例 | 答案: 特征重要性 在对数据集(例如本项目中研究的人口普查数据)进行监督式学习时,一个重要任务是判断哪些特征的预测能力最强。通过侧重于几个关键特征与目标标签之间的关系,我们简化了对数据规律的理解流程,这么做始终都很有用。对于此项目来说,我们希望发现几个能够最为有效地预测个人收入最多 50,000 美元还是超过 50,000 美元的特征。 选择一个具有 feature_importance_ 属性的 scikit 学习分类器(例如 adaboost、随机森林),该属性是一种根据所选分类器对特征重要性进行排序的函数。在下个 python 单元格中,将此分类器与训练集进行拟合,并使用此属性确定人口普查数据集的前 5 个最重要的特征。 问题 6 - 特征相关性研究 在探索数据时发现,人口普查数据中的每条记录有 13 个特征。在这 13 个特征中,你认为哪 5 个特征对预测来说最重要,并且重要性按照什么顺序排序,原因是? 答案: 实现 - 提取特征重要性信息 选择一个具有 feature_importance_ 属性的 scikit-learn 监督式学习算法。 该属性是一种根据所选算法对特征在进行预测时的重要性进行排序的函数。 你需要在下面的代码单元格中实现以下步骤: - 从 sklearn 中导入与之前用到的三个模型不同的监督式学习模型。 - 用整个训练集训练该监督式模型。 - 使用 '.feature_importances_' 提取特征重要性信息。 End of explanation """ # Import functionality for cloning a model from sklearn.base import clone # Reduce the feature space X_train_reduced = X_train[X_train.columns.values[(np.argsort(importances)[::-1])[:5]]] X_test_reduced = X_test[X_test.columns.values[(np.argsort(importances)[::-1])[:5]]] # Train on the "best" model found from grid search earlier clf = (clone(best_clf)).fit(X_train_reduced, y_train) # Make new predictions reduced_predictions = clf.predict(X_test_reduced) # Report scores from the final model using both versions of data print("Final Model trained on full data\n------") print("Accuracy on testing data: {:.4f}".format(accuracy_score(y_test, best_predictions))) print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5))) print("\nFinal Model trained on reduced data\n------") print("Accuracy on testing data: {:.4f}".format(accuracy_score(y_test, reduced_predictions))) print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, reduced_predictions, beta = 0.5))) """ Explanation: 问题 7 - 提取特征重要性信息 观察上述仅使用 5 个用于预测个人收入最多为 50,000 美元还是超过 50,000 美元的最相关特征创建的可视化图表,然后回答以下问题。 * 这 5 个特征与在问题 6 中发现的 5 个特征相比效果如何? * 如果答案接近,该可视化图表对论证你的结论有何帮助? * 如果答案不接近,为何你认为这些特征更相关? 答案: 特征选择 如果我们仅从数据的所有特征中选取部分特征,模型效果会如何?当需要训练的特征更少时,训练时间和预测时间预计会缩短很多,但是效果指标会受到影响。从上面的可视化图表可以看出,前 5 个最重要的特征比数据集中所有特征一半的重要性带来的影响要大,表明我们可以尝试缩小特征空间,并简化模型要学习的信息。以下代码单元格将使用你在之前发现的同一优化模型,并使用相同的训练集进行训练,但是仅使用前 5 个重要特征。 End of explanation """
diegocavalca/Studies
programming/Python/tensorflow/exercises/Neural_Network_Part2_Solutions.ipynb
cc0-1.0
from __future__ import print_function import numpy as np import tensorflow as tf import matplotlib.pyplot as plt %matplotlib inline from datetime import date date.today() author = "kyubyong. https://github.com/Kyubyong/tensorflow-exercises" tf.__version__ np.__version__ """ Explanation: Neural Network Part2 End of explanation """ _x = np.arange(1, 11) epsilon = 1e-12 x = tf.convert_to_tensor(_x, tf.float32) output = tf.nn.l2_normalize(x, dim=0, epsilon=epsilon) with tf.Session() as sess: _output = sess.run(output) assert np.allclose(_output, _x / np.sqrt(np.maximum(np.sum(_x**2), epsilon))) print(_output) """ Explanation: Normalization Q1. Apply l2_normalize to x. End of explanation """ _x = np.arange(1, 11) x = tf.convert_to_tensor(_x, tf.float32) counts_, sum_, sum_of_squares_, _ = tf.nn.sufficient_statistics(x, [0]) mean, variance = tf.nn.normalize_moments(counts_, sum_, sum_of_squares_, shift=None) with tf.Session() as sess: _mean, _variance = sess.run([mean, variance]) print(_mean, _variance) """ Explanation: Q2. Calculate the mean and variance of x based on the sufficient statistics. End of explanation """ tf.reset_default_graph() _x = np.arange(1, 11) x = tf.convert_to_tensor(_x, tf.float32) output = tf.nn.moments(x, [0]) with tf.Session() as sess: _mean, _variance = sess.run(output) print(_mean, _variance) """ Explanation: Q3. Calculate the mean and variance of x. End of explanation """ tf.reset_default_graph() x = tf.constant([1, 1, 2, 2, 2, 3], tf.float32) # From `x` mean, variance = tf.nn.moments(x, [0]) with tf.Session() as sess: print(sess.run([mean, variance])) # From unique elements and their counts unique_x, _, counts = tf.unique_with_counts(x) mean, variance = tf.nn.weighted_moments(unique_x, [0], counts) with tf.Session() as sess: print(sess.run([mean, variance])) """ Explanation: Q4. Calculate the mean and variance of x using unique_x and counts. End of explanation """ # Load data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot=False) # build graph class Graph: def __init__(self, is_training=False): # Inputs and labels self.x = tf.placeholder(tf.float32, shape=[None, 784]) self.y = tf.placeholder(tf.int32, shape=[None]) # Layer 1 w1 = tf.get_variable("w1", shape=[784, 100], initializer=tf.truncated_normal_initializer()) output1 = tf.matmul(self.x, w1) output1 = tf.contrib.layers.batch_norm(output1, center=True, scale=True, is_training=is_training, updates_collections=None, activation_fn=tf.nn.relu) #Layer 2 w2 = tf.get_variable("w2", shape=[100, 10], initializer=tf.truncated_normal_initializer()) logits = tf.matmul(output1, w2) preds = tf.to_int32(tf.arg_max(logits, dimension=1)) # training loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=logits) self.train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss) self.acc = tf.reduce_mean(tf.to_float(tf.equal(self.y, preds))) # Training tf.reset_default_graph() g = Graph(is_training=True) init_op = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init_op) saver = tf.train.Saver() for i in range(1, 10000+1): batch = mnist.train.next_batch(60) sess.run(g.train_op, {g.x: batch[0], g.y: batch[1]}) # Evaluation if i % 100 == 0: print("training steps=", i, "Acc. =", sess.run(g.acc, {g.x: mnist.test.images, g.y: mnist.test.labels})) save_path = saver.save(sess, './my-model') # Inference tf.reset_default_graph() g2 = Graph(is_training=False) with tf.Session() as sess: saver = tf.train.Saver() saver.restore(sess, save_path) hits = 0 for i in range(100): hits += sess.run(g2.acc, {g2.x: [mnist.test.images[i]], g2.y: [mnist.test.labels[i]]}) print(hits) """ Explanation: Q5. The code below is to implement the mnist classification task. Complete it by adding batch normalization. End of explanation """ tf.reset_default_graph() x = tf.constant([1, 1, 2, 2, 2, 3], tf.float32) output = tf.nn.l2_loss(x) with tf.Session() as sess: print(sess.run(output)) print(sess.run(tf.reduce_sum(x**2)/2)) """ Explanation: Losses Q06. Compute half the L2 norm of x without the sqrt. End of explanation """ tf.reset_default_graph() logits = tf.random_normal(shape=[2, 5, 10]) labels = tf.convert_to_tensor(np.random.randint(0, 10, size=[2, 5]), tf.int32) output = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits) with tf.Session() as sess: print(sess.run(output)) """ Explanation: Classification Q7. Compute softmax cross entropy between logits and labels. Note that the rank of them is not the same. End of explanation """ logits = tf.random_normal(shape=[2, 5, 10]) labels = tf.convert_to_tensor(np.random.randint(0, 10, size=[2, 5]), tf.int32) labels = tf.one_hot(labels, depth=10) output = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits) with tf.Session() as sess: print(sess.run(output)) """ Explanation: Q8. Compute softmax cross entropy between logits and labels. End of explanation """ tf.reset_default_graph() x = tf.constant([0, 2, 1, 3, 4], tf.int32) embedding = tf.constant([0, 0.1, 0.2, 0.3, 0.4], tf.float32) output = tf.nn.embedding_lookup(embedding, x) with tf.Session() as sess: print(sess.run(output)) """ Explanation: Embeddings Q9. Map tensor x to the embedding. End of explanation """
AhmetHamzaEmra/Deep-Learning-Specialization-Coursera
Neural Networks and Deep Learning/Building+your+Deep+Neural+Network+-+Step+by+Step+v3.ipynb
mit
import numpy as np import h5py import matplotlib.pyplot as plt from testCases_v2 import * from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward %matplotlib inline plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' %load_ext autoreload %autoreload 2 np.random.seed(1) """ Explanation: Building your Deep Neural Network: Step by Step Welcome to your week 4 assignment (part 1 of 2)! You have previously trained a 2-layer Neural Network (with a single hidden layer). This week, you will build a deep neural network, with as many layers as you want! In this notebook, you will implement all the functions required to build a deep neural network. In the next assignment, you will use these functions to build a deep neural network for image classification. After this assignment you will be able to: - Use non-linear units like ReLU to improve your model - Build a deeper neural network (with more than 1 hidden layer) - Implement an easy-to-use neural network class Notation: - Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer. - Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters. - Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example. - Example: $x^{(i)}$ is the $i^{th}$ training example. - Lowerscript $i$ denotes the $i^{th}$ entry of a vector. - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations). Let's get started! 1 - Packages Let's first import all the packages that you will need during this assignment. - numpy is the main package for scientific computing with Python. - matplotlib is a library to plot graphs in Python. - dnn_utils provides some necessary functions for this notebook. - testCases provides some test cases to assess the correctness of your functions - np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed. End of explanation """ # GRADED FUNCTION: initialize_parameters def initialize_parameters(n_x, n_h, n_y): """ Argument: n_x -- size of the input layer n_h -- size of the hidden layer n_y -- size of the output layer Returns: parameters -- python dictionary containing your parameters: W1 -- weight matrix of shape (n_h, n_x) b1 -- bias vector of shape (n_h, 1) W2 -- weight matrix of shape (n_y, n_h) b2 -- bias vector of shape (n_y, 1) """ np.random.seed(1) ### START CODE HERE ### (≈ 4 lines of code) W1 = np.random.randn(n_h,n_x)*0.01 b1 = np.zeros((n_h,1)) W2 = np.random.randn(n_y,n_h)*0.01 b2 = np.zeros((n_y,1)) ### END CODE HERE ### assert(W1.shape == (n_h, n_x)) assert(b1.shape == (n_h, 1)) assert(W2.shape == (n_y, n_h)) assert(b2.shape == (n_y, 1)) parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters parameters = initialize_parameters(2,2,1) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) """ Explanation: 2 - Outline of the Assignment To build your neural network, you will be implementing several "helper functions". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will: Initialize the parameters for a two-layer network and for an $L$-layer neural network. Implement the forward propagation module (shown in purple in the figure below). Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$). We give you the ACTIVATION function (relu/sigmoid). Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function. Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function. Compute the loss. Implement the backward propagation module (denoted in red in the figure below). Complete the LINEAR part of a layer's backward propagation step. We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward) Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function. Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function Finally update the parameters. <img src="images/final outline.png" style="width:800px;height:500px;"> <caption><center> Figure 1</center></caption><br> Note that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps. 3 - Initialization You will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers. 3.1 - 2-layer Neural Network Exercise: Create and initialize the parameters of the 2-layer neural network. Instructions: - The model's structure is: LINEAR -> RELU -> LINEAR -> SIGMOID. - Use random initialization for the weight matrices. Use np.random.randn(shape)*0.01 with the correct shape. - Use zero initialization for the biases. Use np.zeros(shape). End of explanation """ # GRADED FUNCTION: initialize_parameters_deep def initialize_parameters_deep(layer_dims): """ Arguments: layer_dims -- python array (list) containing the dimensions of each layer in our network Returns: parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL": Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1]) bl -- bias vector of shape (layer_dims[l], 1) """ np.random.seed(3) parameters = {} L = len(layer_dims) # number of layers in the network for l in range(1, L): ### START CODE HERE ### (≈ 2 lines of code) parameters['W' + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1])*0.01 parameters['b' + str(l)] = np.zeros((layer_dims[l],1)) ### END CODE HERE ### assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1])) assert(parameters['b' + str(l)].shape == (layer_dims[l], 1)) return parameters parameters = initialize_parameters_deep([5,4,3]) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) """ Explanation: Expected output: <table style="width:80%"> <tr> <td> **W1** </td> <td> [[ 0.01624345 -0.00611756] [-0.00528172 -0.01072969]] </td> </tr> <tr> <td> **b1**</td> <td>[[ 0.] [ 0.]]</td> </tr> <tr> <td>**W2**</td> <td> [[ 0.00865408 -0.02301539]]</td> </tr> <tr> <td> **b2** </td> <td> [[ 0.]] </td> </tr> </table> 3.2 - L-layer Neural Network The initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the initialize_parameters_deep, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then: <table style="width:100%"> <tr> <td> </td> <td> **Shape of W** </td> <td> **Shape of b** </td> <td> **Activation** </td> <td> **Shape of Activation** </td> <tr> <tr> <td> **Layer 1** </td> <td> $(n^{[1]},12288)$ </td> <td> $(n^{[1]},1)$ </td> <td> $Z^{[1]} = W^{[1]} X + b^{[1]} $ </td> <td> $(n^{[1]},209)$ </td> <tr> <tr> <td> **Layer 2** </td> <td> $(n^{[2]}, n^{[1]})$ </td> <td> $(n^{[2]},1)$ </td> <td>$Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ </td> <td> $(n^{[2]}, 209)$ </td> <tr> <tr> <td> $\vdots$ </td> <td> $\vdots$ </td> <td> $\vdots$ </td> <td> $\vdots$</td> <td> $\vdots$ </td> <tr> <tr> <td> **Layer L-1** </td> <td> $(n^{[L-1]}, n^{[L-2]})$ </td> <td> $(n^{[L-1]}, 1)$ </td> <td>$Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ </td> <td> $(n^{[L-1]}, 209)$ </td> <tr> <tr> <td> **Layer L** </td> <td> $(n^{[L]}, n^{[L-1]})$ </td> <td> $(n^{[L]}, 1)$ </td> <td> $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$</td> <td> $(n^{[L]}, 209)$ </td> <tr> </table> Remember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if: $$ W = \begin{bmatrix} j & k & l\ m & n & o \ p & q & r \end{bmatrix}\;\;\; X = \begin{bmatrix} a & b & c\ d & e & f \ g & h & i \end{bmatrix} \;\;\; b =\begin{bmatrix} s \ t \ u \end{bmatrix}\tag{2}$$ Then $WX + b$ will be: $$ WX + b = \begin{bmatrix} (ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\ (ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\ (pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u \end{bmatrix}\tag{3} $$ Exercise: Implement initialization for an L-layer Neural Network. Instructions: - The model's structure is [LINEAR -> RELU] $ \times$ (L-1) -> LINEAR -> SIGMOID. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function. - Use random initialization for the weight matrices. Use np.random.rand(shape) * 0.01. - Use zeros initialization for the biases. Use np.zeros(shape). - We will store $n^{[l]}$, the number of units in different layers, in a variable layer_dims. For example, the layer_dims for the "Planar Data classification model" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. Thus means W1's shape was (4,2), b1 was (4,1), W2 was (1,4) and b2 was (1,1). Now you will generalize this to $L$ layers! - Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network). python if L == 1: parameters["W" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01 parameters["b" + str(L)] = np.zeros((layer_dims[1], 1)) End of explanation """ # GRADED FUNCTION: linear_forward def linear_forward(A, W, b): """ Implement the linear part of a layer's forward propagation. Arguments: A -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) Returns: Z -- the input of the activation function, also called pre-activation parameter cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently """ ### START CODE HERE ### (≈ 1 line of code) Z = np.dot(W,A)+b ### END CODE HERE ### assert(Z.shape == (W.shape[0], A.shape[1])) cache = (A, W, b) return Z, cache A, W, b = linear_forward_test_case() Z, linear_cache = linear_forward(A, W, b) print("Z = " + str(Z)) """ Explanation: Expected output: <table style="width:80%"> <tr> <td> **W1** </td> <td>[[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388] [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218] [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034] [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]</td> </tr> <tr> <td>**b1** </td> <td>[[ 0.] [ 0.] [ 0.] [ 0.]]</td> </tr> <tr> <td>**W2** </td> <td>[[-0.01185047 -0.0020565 0.01486148 0.00236716] [-0.01023785 -0.00712993 0.00625245 -0.00160513] [-0.00768836 -0.00230031 0.00745056 0.01976111]]</td> </tr> <tr> <td>**b2** </td> <td>[[ 0.] [ 0.] [ 0.]]</td> </tr> </table> 4 - Forward propagation module 4.1 - Linear Forward Now that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order: LINEAR LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid. [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID (whole model) The linear forward module (vectorized over all the examples) computes the following equations: $$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\tag{4}$$ where $A^{[0]} = X$. Exercise: Build the linear part of forward propagation. Reminder: The mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find np.dot() useful. If your dimensions don't match, printing W.shape may help. End of explanation """ # GRADED FUNCTION: linear_activation_forward def linear_activation_forward(A_prev, W, b, activation): """ Implement the forward propagation for the LINEAR->ACTIVATION layer Arguments: A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: A -- the output of the activation function, also called the post-activation value cache -- a python dictionary containing "linear_cache" and "activation_cache"; stored for computing the backward pass efficiently """ if activation == "sigmoid": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev,W,b) A, activation_cache = sigmoid(Z) ### END CODE HERE ### elif activation == "relu": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev,W,b) A, activation_cache = relu(Z) ### END CODE HERE ### assert (A.shape == (W.shape[0], A_prev.shape[1])) cache = (linear_cache, activation_cache) return A, cache A_prev, W, b = linear_activation_forward_test_case() A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid") print("With sigmoid: A = " + str(A)) A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu") print("With ReLU: A = " + str(A)) """ Explanation: Expected output: <table style="width:35%"> <tr> <td> **Z** </td> <td> [[ 3.26295337 -1.23429987]] </td> </tr> </table> 4.2 - Linear-Activation Forward In this notebook, you will use two activation functions: Sigmoid: $\sigma(Z) = \sigma(W A + b) = \frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the sigmoid function. This function returns two items: the activation value "a" and a "cache" that contains "Z" (it's what we will feed in to the corresponding backward function). To use it you could just call: python A, activation_cache = sigmoid(Z) ReLU: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the relu function. This function returns two items: the activation value "A" and a "cache" that contains "Z" (it's what we will feed in to the corresponding backward function). To use it you could just call: python A, activation_cache = relu(Z) For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step. Exercise: Implement the forward propagation of the LINEAR->ACTIVATION layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation "g" can be sigmoid() or relu(). Use linear_forward() and the correct activation function. End of explanation """ # GRADED FUNCTION: L_model_forward def L_model_forward(X, parameters): """ Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation Arguments: X -- data, numpy array of shape (input size, number of examples) parameters -- output of initialize_parameters_deep() Returns: AL -- last post-activation value caches -- list of caches containing: every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2) the cache of linear_sigmoid_forward() (there is one, indexed L-1) """ caches = [] A = X L = len(parameters) // 2 # number of layers in the neural network # Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list. for l in range(1, L): A_prev = A ### START CODE HERE ### (≈ 2 lines of code) A, cache = linear_activation_forward(A_prev, parameters['W'+str(l)], parameters['b'+str(l)], activation='relu') caches.append(cache) ### END CODE HERE ### # Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list. ### START CODE HERE ### (≈ 2 lines of code) AL, cache = linear_activation_forward(A, parameters['W'+str(l+1)], parameters['b'+str(l+1)], activation='sigmoid') caches.append(cache) ### END CODE HERE ### assert(AL.shape == (1,X.shape[1])) return AL, caches X, parameters = L_model_forward_test_case() AL, caches = L_model_forward(X, parameters) print("AL = " + str(AL)) print("Length of caches list = " + str(len(caches))) """ Explanation: Expected output: <table style="width:35%"> <tr> <td> **With sigmoid: A ** </td> <td > [[ 0.96890023 0.11013289]]</td> </tr> <tr> <td> **With ReLU: A ** </td> <td > [[ 3.43896131 0. ]]</td> </tr> </table> Note: In deep learning, the "[LINEAR->ACTIVATION]" computation is counted as a single layer in the neural network, not two layers. d) L-Layer Model For even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (linear_activation_forward with RELU) $L-1$ times, then follows that with one linear_activation_forward with SIGMOID. <img src="images/model_architecture_kiank.png" style="width:600px;height:300px;"> <caption><center> Figure 2 : [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID model</center></caption><br> Exercise: Implement the forward propagation of the above model. Instruction: In the code below, the variable AL will denote $A^{[L]} = \sigma(Z^{[L]}) = \sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called Yhat, i.e., this is $\hat{Y}$.) Tips: - Use the functions you had previously written - Use a for loop to replicate [LINEAR->RELU] (L-1) times - Don't forget to keep track of the caches in the "caches" list. To add a new value c to a list, you can use list.append(c). End of explanation """ # GRADED FUNCTION: compute_cost def compute_cost(AL, Y): """ Implement the cost function defined by equation (7). Arguments: AL -- probability vector corresponding to your label predictions, shape (1, number of examples) Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples) Returns: cost -- cross-entropy cost """ m = Y.shape[1] # Compute loss from aL and y. ### START CODE HERE ### (≈ 1 lines of code) cost = -np.sum(np.dot(Y,np.log(AL).T)+np.dot((1-Y),np.log(1-AL).T))/m ### END CODE HERE ### cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17). assert(cost.shape == ()) return cost Y, AL = compute_cost_test_case() print("cost = " + str(compute_cost(AL, Y))) """ Explanation: <table style="width:40%"> <tr> <td> **AL** </td> <td > [[ 0.17007265 0.2524272 ]]</td> </tr> <tr> <td> **Length of caches list ** </td> <td > 2</td> </tr> </table> Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in "caches". Using $A^{[L]}$, you can compute the cost of your predictions. 5 - Cost function Now you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning. Exercise: Compute the cross-entropy cost $J$, using the following formula: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{L}\right)) \tag{7}$$ End of explanation """ # GRADED FUNCTION: linear_backward def linear_backward(dZ, cache): """ Implement the linear portion of backward propagation for a single layer (layer l) Arguments: dZ -- Gradient of the cost with respect to the linear output (of current layer l) cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ A_prev, W, b = cache m = A_prev.shape[1] ### START CODE HERE ### (≈ 3 lines of code) dW = np.dot(dZ,A_prev.T)/m db = np.sum(dZ,axis=1,keepdims=True)/m dA_prev = np.dot(W.T,dZ) ### END CODE HERE ### assert (dA_prev.shape == A_prev.shape) assert (dW.shape == W.shape) assert (db.shape == b.shape) return dA_prev, dW, db # Set up some test inputs dZ, linear_cache = linear_backward_test_case() dA_prev, dW, db = linear_backward(dZ, linear_cache) print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) """ Explanation: Expected Output: <table> <tr> <td>**cost** </td> <td> 0.41493159961539694</td> </tr> </table> 6 - Backward propagation module Just like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters. Reminder: <img src="images/backprop_kiank.png" style="width:650px;height:250px;"> <caption><center> Figure 3 : Forward and Backward propagation for LINEAR->RELU->LINEAR->SIGMOID <br> The purple blocks represent the forward propagation, and the red blocks represent the backward propagation. </center></caption> <!-- For those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows: $$\frac{d \mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \frac{d\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\frac{{da^{[2]}}}{{dz^{[2]}}}\frac{{dz^{[2]}}}{{da^{[1]}}}\frac{{da^{[1]}}}{{dz^{[1]}}} \tag{8} $$ In order to calculate the gradient $dW^{[1]} = \frac{\partial L}{\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted. Equivalently, in order to calculate the gradient $db^{[1]} = \frac{\partial L}{\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial b^{[1]}}$. This is why we talk about **backpropagation**. !--> Now, similar to forward propagation, you are going to build the backward propagation in three steps: - LINEAR backward - LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model) 6.1 - Linear backward For layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation). Suppose you have already calculated the derivative $dZ^{[l]} = \frac{\partial \mathcal{L} }{\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]} dA^{[l-1]})$. <img src="images/linearback_kiank.png" style="width:250px;height:300px;"> <caption><center> Figure 4 </center></caption> The three outputs $(dW^{[l]}, db^{[l]}, dA^{[l]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need: $$ dW^{[l]} = \frac{\partial \mathcal{L} }{\partial W^{[l]}} = \frac{1}{m} dZ^{[l]} A^{[l-1] T} \tag{8}$$ $$ db^{[l]} = \frac{\partial \mathcal{L} }{\partial b^{[l]}} = \frac{1}{m} \sum_{i = 1}^{m} dZ^{l}\tag{9}$$ $$ dA^{[l-1]} = \frac{\partial \mathcal{L} }{\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \tag{10}$$ Exercise: Use the 3 formulas above to implement linear_backward(). End of explanation """ # GRADED FUNCTION: linear_activation_backward def linear_activation_backward(dA, cache, activation): """ Implement the backward propagation for the LINEAR->ACTIVATION layer. Arguments: dA -- post-activation gradient for current layer l cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ linear_cache, activation_cache = cache if activation == "relu": ### START CODE HERE ### (≈ 2 lines of code) dZ = relu_backward(dA, activation_cache) dA_prev, dW, db = linear_backward(dZ, linear_cache) ### END CODE HERE ### elif activation == "sigmoid": ### START CODE HERE ### (≈ 2 lines of code) dZ = sigmoid_backward(dA, activation_cache) dA_prev, dW, db = linear_backward(dZ, linear_cache) ### END CODE HERE ### return dA_prev, dW, db AL, linear_activation_cache = linear_activation_backward_test_case() dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "sigmoid") print ("sigmoid:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db) + "\n") dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "relu") print ("relu:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) """ Explanation: Expected Output: <table style="width:90%"> <tr> <td> **dA_prev** </td> <td > [[ 0.51822968 -0.19517421] [-0.40506361 0.15255393] [ 2.37496825 -0.89445391]] </td> </tr> <tr> <td> **dW** </td> <td > [[-0.10076895 1.40685096 1.64992505]] </td> </tr> <tr> <td> **db** </td> <td> [[ 0.50629448]] </td> </tr> </table> 6.2 - Linear-Activation backward Next, you will create a function that merges the two helper functions: linear_backward and the backward step for the activation linear_activation_backward. To help you implement linear_activation_backward, we provided two backward functions: - sigmoid_backward: Implements the backward propagation for SIGMOID unit. You can call it as follows: python dZ = sigmoid_backward(dA, activation_cache) relu_backward: Implements the backward propagation for RELU unit. You can call it as follows: python dZ = relu_backward(dA, activation_cache) If $g(.)$ is the activation function, sigmoid_backward and relu_backward compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \tag{11}$$. Exercise: Implement the backpropagation for the LINEAR->ACTIVATION layer. End of explanation """ # GRADED FUNCTION: L_model_backward def L_model_backward(AL, Y, caches): """ Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group Arguments: AL -- probability vector, output of the forward propagation (L_model_forward()) Y -- true "label" vector (containing 0 if non-cat, 1 if cat) caches -- list of caches containing: every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2) the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1]) Returns: grads -- A dictionary with the gradients grads["dA" + str(l)] = ... grads["dW" + str(l)] = ... grads["db" + str(l)] = ... """ grads = {} L = len(caches) # the number of layers m = AL.shape[1] Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL # Initializing the backpropagation ### START CODE HERE ### (1 line of code) dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL ### END CODE HERE ### # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"] ### START CODE HERE ### (approx. 2 lines) current_cache = caches[L-1] grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid") ### END CODE HERE ### for l in reversed(range(L-1)): # lth layer: (RELU -> LINEAR) gradients. # Inputs: "grads["dA" + str(l + 2)], caches". Outputs: "grads["dA" + str(l + 1)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)] ### START CODE HERE ### (approx. 5 lines) current_cache = caches[l] dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 2)], current_cache, activation = "relu") #dA_prev_temp, dW_temp, db_temp = linear_activation_backward(dA_prev_temp, current_cache, "relu") grads["dA" + str(l + 1)] = dA_prev_temp grads["dW" + str(l + 1)] = dW_temp grads["db" + str(l + 1)] = db_temp ### END CODE HERE ### return grads AL, Y_assess, caches = L_model_backward_test_case() grads = L_model_backward(AL, Y_assess, caches) print ("dW1 = "+ str(grads["dW1"])) print ("db1 = "+ str(grads["db1"])) print ("dA1 = "+ str(grads["dA1"])) """ Explanation: Expected output with sigmoid: <table style="width:100%"> <tr> <td > dA_prev </td> <td >[[ 0.11017994 0.01105339] [ 0.09466817 0.00949723] [-0.05743092 -0.00576154]] </td> </tr> <tr> <td > dW </td> <td > [[ 0.10266786 0.09778551 -0.01968084]] </td> </tr> <tr> <td > db </td> <td > [[-0.05729622]] </td> </tr> </table> Expected output with relu <table style="width:100%"> <tr> <td > dA_prev </td> <td > [[ 0.44090989 0. ] [ 0.37883606 0. ] [-0.2298228 0. ]] </td> </tr> <tr> <td > dW </td> <td > [[ 0.44513824 0.37371418 -0.10478989]] </td> </tr> <tr> <td > db </td> <td > [[-0.20837892]] </td> </tr> </table> 6.3 - L-Model Backward Now you will implement the backward function for the whole network. Recall that when you implemented the L_model_forward function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the L_model_backward function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass. <img src="images/mn_backward.png" style="width:450px;height:300px;"> <caption><center> Figure 5 : Backward pass </center></caption> Initializing backpropagation: To backpropagate through this network, we know that the output is, $A^{[L]} = \sigma(Z^{[L]})$. Your code thus needs to compute dAL $= \frac{\partial \mathcal{L}}{\partial A^{[L]}}$. To do so, use this formula (derived using calculus which you don't need in-depth knowledge of): python dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL You can then use this post-activation gradient dAL to keep going backward. As seen in Figure 5, you can now feed in dAL into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a for loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula : $$grads["dW" + str(l)] = dW^{[l]}\tag{15} $$ For example, for $l=3$ this would store $dW^{[l]}$ in grads["dW3"]. Exercise: Implement backpropagation for the [LINEAR->RELU] $\times$ (L-1) -> LINEAR -> SIGMOID model. End of explanation """ # GRADED FUNCTION: update_parameters def update_parameters(parameters, grads, learning_rate): """ Update parameters using gradient descent Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients, output of L_model_backward Returns: parameters -- python dictionary containing your updated parameters parameters["W" + str(l)] = ... parameters["b" + str(l)] = ... """ L = len(parameters) // 2 # number of layers in the neural network # Update rule for each parameter. Use a for loop. ### START CODE HERE ### (≈ 3 lines of code) for l in range(L): parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - grads['dW'+str(l+1)]*learning_rate parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - grads['db'+str(l+1)]*learning_rate ### END CODE HERE ### return parameters parameters, grads = update_parameters_test_case() parameters = update_parameters(parameters, grads, 0.1) print ("W1 = "+ str(parameters["W1"])) print ("b1 = "+ str(parameters["b1"])) print ("W2 = "+ str(parameters["W2"])) print ("b2 = "+ str(parameters["b2"])) """ Explanation: Expected Output <table style="width:60%"> <tr> <td > dW1 </td> <td > [[ 0.41010002 0.07807203 0.13798444 0.10502167] [ 0. 0. 0. 0. ] [ 0.05283652 0.01005865 0.01777766 0.0135308 ]] </td> </tr> <tr> <td > db1 </td> <td > [[-0.22007063] [ 0. ] [-0.02835349]] </td> </tr> <tr> <td > dA1 </td> <td > [[ 0. 0.52257901] [ 0. -0.3269206 ] [ 0. -0.32070404] [ 0. -0.74079187]] </td> </tr> </table> 6.4 - Update Parameters In this section you will update the parameters of the model, using gradient descent: $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{16}$$ $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{17}$$ where $\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary. Exercise: Implement update_parameters() to update your parameters using gradient descent. Instructions: Update parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$. End of explanation """
ziky5/F4500_Python_pro_fyziky
lekce_07/Moduly.ipynb
mit
from os import path path.exists("data.csv") """ Explanation: Moduly moduly aneb O importování aliasy lze importovat jen jednu třídu/funkci/proměnnou, ale moduly mohou mit i více úrovní End of explanation """ from os.path import exists """ Explanation: lze naimportovat jednotlive funkce End of explanation """ from sys import path path[-5:] """ Explanation: ale v jiném modulu má jiný význam End of explanation """ path.append("/home/jovyan/work/") path[-5:] """ Explanation: lze přidat adresář k těm, ve kterých Python hledá importované moduly End of explanation """ from math import pi import numpy as np pi is np.pi, pi==np.pi """ Explanation: ještě jeden příklad stejného jména, tady i se stejným významem - ale je to totéž? End of explanation """ ## !! nedoporucovano #from matplotlib.pyplot import * ## lepe takto import numpy as np import matplotlib.pyplot as pl """ Explanation: poslední, ale nedoporučovaná specialita syntaxe from xy import * pozor na "jmenné znečistění" (nevíme, co všechno importujeme) se značkou "*" nelze importovat uvnitř funkcí/tříd End of explanation """ print(np.__version__, np.__file__) print(np.__doc__[:1000]+"...") np? print("modul np obsahuje %i pojmenovanych funkci/proměnných/podmodulů"%len(np.__all__)) np.__all__[10:20] """ Explanation: co nás ještě může zajímat (vše - jako třeba help a.k.a. docstring - je dostupné pro programové zpracování) End of explanation """ from imp import reload reload(np) """ Explanation: ... a teď vlastní modul? Jak prosté... vše, co chcete (třídy pro řešení Fisherovy úlohy z pojednání o objektech), nakopírujete do souboru jako novy.py python import novy novy.Fisher("data.csv") aktualizace upravili jsme svůj kód, nebo nahráli novou verzi python from imp import reload pro fajnšmekry ještě - hlubší úrovně nutne "reloadnout" ručně (nebo dreload aka "deep reload") - nutno znovu vytvořit objekty z dotčených tříd End of explanation """ #nejak takto %matplotlib inline import fisher fg=fisher.FisherGraph("data.csv") fg.fit() #fg.graph([0,30],"X","Y","Importovano") """ Explanation: ještě pár pojmů packages celé adresáře (jednotlivé soubory jsou podmoduly) vložení (prázdného) __init__.py jak použít program jako modul? před části kódu, která se nemá spustit při importu, přidáme řádek .. if __name__=="__main__": kde se hledají: PYTHONPATH nebo sys.path (lze přidávat za běhu) hlavičky spustitelný (v linuxu?) #!/usr/bin/python nebo #! /usr/bin/env python charset (kódování) # -*- coding: utf-8 -*- docstring (co všechno v modulu najdete) cvičení zde Level 1 vytvořit modul řešící Fišerův problém (soubor fisher.py) naimportovat jej do nového notebooku Level 2 vytvořit globální proměnnou v rámci modulu fontsize s hodnotou 25 na vhodných místech nahradit konstantu proměnnou Level 3 příkazová řádka? příkazová řádka! End of explanation """
tpin3694/tpin3694.github.io
regex/match_a_unicode_character.ipynb
mit
# Load regex package import re """ Explanation: Title: Match A Unicode Character Slug: match_a_unicode_character Summary: Match A Unicode Character Date: 2016-05-01 12:00 Category: Regex Tags: Basics Authors: Chris Albon Based on: Regular Expressions Cookbook Preliminaries End of explanation """ # Create a variable containing a text string text = 'Microsoft™.' """ Explanation: Create some text End of explanation """ # Find any unicode character for a trademark re.findall(r'\u2122', text) """ Explanation: Apply regex End of explanation """
adolfoguimaraes/machinelearning
Introduction/Exercicio01_Titanic.ipynb
mit
import pandas as pd import numpy as np #Lendo a base de dados df = pd.read_csv('../datasets/titanic/train.csv') print("Tabela Original") df.head() df = df.drop(['Name', 'Ticket', 'Cabin'], axis=1) df = df.dropna() df['Gender'] = df['Sex'].map({'female': 0, 'male':1}).astype(int) df['Port'] = df['Embarked'].map({'C':1, 'S':2, 'Q':3}).astype(int) df = df.drop(['Sex', 'Embarked'], axis=1) cols = df.columns.tolist() cols = [cols[1]] + cols[0:1] + cols[2:] df = df[cols] print("Tabela depois de processada") df.head() """ Explanation: Exercício 01 - Titanic Esse exercício tem como objetivo exercitar os conceitos básicos de dataset e de aprendizagem de máquina vistos no tutorial HelloWorld e prepara-lo para o que será abordado nas próximas aulas. No exercício vamos trabalhar com um outro clássico dataset de aprendizagem de máquina: sobreviventes do titanic. Este também é um problema de classificação que visa dado um conjunto de informações dos passageiros do titanic, pretende-se classifica-los em "sobreviveu" ou "não sobreviveu" ao naufrágio. A sua tarefa principal é pesquisar sobre o KNN no scikit learn e aplica-lo a este problema. A base não vem com o ScikitLearn e vamos precisar baixar e carrega-la para trabalhar neste exemplo. Para baixar e deixar a base pronta para uso segui o tutorial a seguir: http://nbviewer.jupyter.org/github/savarin/pyconuk-introtutorial/blob/master/notebooks/Section%201-0%20-%20First%20Cut.ipynb Como a base é da competição do Kaggle, não irei disponibiliza-la aqui. Para baixar os arquivos necessários, acesse: https://www.kaggle.com/c/titanic/data (é necessário fazer um cadastro no Kaggle) Preparando a base de dados Esta etapa não é o foco do nosso exercício. Sendo assim, só irei reproduzir o passo a passo do link passado anteriormente. End of explanation """ dataset = { 'data': df.values[0:,2:], 'target': df.values[0:,0] } X = dataset['data'] y = dataset['target'] """ Explanation: Por fim, geramos nossa base de treino e teste no formato para o ScikitLearn End of explanation """
dietmarw/EK5312_ElectricalMachines
Chapman/Ch6-Problem_6-06.ipynb
unlicense
%pylab notebook """ Explanation: Excercises Electric Machinery Fundamentals Chapter 6 Problem 6-6 End of explanation """ R1 = 0.10 # [Ohm] R2 = 0.07 # [Ohm] Xm = 10.0 # [Ohm] X1 = 0.21 # [Ohm] X2 = 0.21 # [Ohm] Pmech = 500 # [W] Pmisc = 0 # [W] Pcore = 400 # [W] Vphi = 120 # [V] w_sync = 188.5 # [rad/s] """ Explanation: Description For the motor in Problem 6-5 What is the slip at the pullout torque? What is the pullout torque of this motor? End of explanation """ Zth = (Xm*1j * (R1 + X1*1j)) / (R1 + (X1+Xm)*1j) Zth_angle = arctan(Zth.imag/Zth.real) print('Zth = ({:.4f})Ω = {:.3f} Ω ∠{:.1f}°'.format(Zth, abs(Zth), Zth_angle/pi*180)) """ Explanation: SOLUTION The slip at pullout torque is found by calculating the Thevenin equivalent of the input circuit from the rotor back to the power supply, and then using that with the rotor circuit model. <img src="figs/FigC_6-18.jpg" width="70%"> $$Z_{TH} = \frac{jX_M(R_1+jX_1)}{R_1 + j(X_1+X_M)}$$ End of explanation """ Vth = Xm*1j / (R1 + (X1+Xm)*1j) * Vphi Vth_angle = arctan(Vth.imag/Vth.real) print('Zth = {:.1f} V ∠{:.1f}°'.format(abs(Vth), Vth_angle/pi*180)) """ Explanation: $$\vec{V_{TH}} = \frac{jX_M}{R_1 + j(X_1+X_M)} \vec{V_ \phi }$$ End of explanation """ Rth = Zth.real Xth = Zth.imag s_max = R2 / sqrt(Rth**2 + (Xth+X2)**2) print(''' s_max = {:.3f} ============='''.format(s_max)) """ Explanation: The slip at pullout torque is: $$S_\text{max} = \frac{R_2}{\sqrt{R_{TH}^2+(X_{TH}+X_2)^2}}$$ End of explanation """ tau_max = (3* abs(Vth)**2) / (2 * w_sync*(Rth + sqrt(Rth**2 + (Xth+X2)**2))) print(''' tau_max = {:.0f} Nm ================'''.format(tau_max)) """ Explanation: The pullout torque of the motor is: $$\tau_\text{max} = \frac{3V_{TH}^2}{2\omega_\text{sync}[R_{TH}+\sqrt{R_{TH}^2+(X_{TH}+X_2)^2}]}$$ End of explanation """
EFerriss/pynams
EXAMPLES_experimentation.ipynb
mit
import pynams """ Explanation: pynams functions that help when running experiments End of explanation """ from pynams import fO2 fO2 = fO2(celsius=1000, buffer_curve='NNO') print(fO2) """ Explanation: What is the log base 10 of the fO2 in bars for a given temperature and buffer? End of explanation """ from pynams import V_from_log10fO2 V_from_log10fO2(celsius=1000, log10fO2=fO2) """ Explanation: What does that fO2 correspond to in mV reported by an O2 sensor? End of explanation """ from pynams import log10fO2_from_V logfO2 = log10fO2_from_V(celsius=1000, volts=-0.8) """ Explanation: My fO2 meter is reading x mV. What fO2 does that correspond to in bars? End of explanation """
IS-ENES-Data/submission_forms
test/Templates/.ipynb_checkpoints/Create_Submission_Form-checkpoint.ipynb
apache-2.0
from dkrz_forms import form_widgets form_widgets.show_status('form-generation') """ Explanation: Create your DKRZ data ingest request form To generate a data submission form for you, please edit the cell below to include your name, email as well as the project your data belogs to Then please press "Shift" + Enter to evaluate the cell a link to the newly generated data submission form will be provided please follow this link to edit your personal form Attention: remember the form password provided to you the password is needed to retrieve your form at a later point e.g. for completion Currently the following ingest requests are supported: "CORDEX": CORDEX data ingest requests - CORDEX data to be published in ESGF, the form is aligned to the original CORDEX data ingest exel sheet used for ingest requests at DKRZ "CMIP6": CMIP6 data ingest request form for data providers - CMIP6 data to be ingested and published in ESGF and which will be long term archived as part of the WDCC "ESGF_replication" : CMIP6 data request form for data users - request for CMIP6 data to be replicated and made available as part of the DKRZ national archive "DKRZ_CDP": data ingest request for (CMIP6 related) data collections to be included in the DKRZ CMIP data pool (CDP) e.g. for model evaluation purposes "test": for demo and testing puposes End of explanation """ from dkrz_forms import form_widgets form_widgets.create_form() """ Explanation: Create a data form Evaluate the cell below ("Shift-Enter") in case no input fields are visible the form will be created as soon as you press "Enter" in the last input field below to fill the form follow the url shown as as result of the form generation in case you want to retrieve and complete the form later on, please follow the steps outlined below in "Retrieve your DKRZ data form" End of explanation """ from dkrz_forms import form_widgets form_widgets.show_status('form-retrieval') """ Explanation: Retrieve your DKRZ data form Via this form you can retrieve previously generated data forms and make them accessible via the Web again for completion. Additionally you can get information on the data ingest process status related to your form based request. End of explanation """ MY_LAST_NAME = "...." # e.gl MY_LAST_NAME = "schulz" #------------------------------------------------- from dkrz_forms import form_handler, form_widgets form_info = form_widgets.check_and_retrieve(MY_LAST_NAME) """ Explanation: Please provide your last name please set your last name in the cell below and evaluate the cell (press "Shift-Return") - when entering the key identifying your form the form is retrieved and accessible via the url presented. (The identifying key was provided to you as part of the form generation step) End of explanation """
sdpython/ensae_teaching_cs
_doc/notebooks/exams/td_note_2015.ipynb
mit
from jyquickhelper import add_notebook_menu add_notebook_menu() """ Explanation: 1A.e - TD noté, 5 décembre 2014 Parcours de chemins dans un graphe acyclique (arbre). End of explanation """ def adjacence(N): # on crée uen matrice vide mat = [ [ 0 for j in range(N) ] for i in range(N) ] for i in range(0,N-1): mat[i][i+1] = 1 return mat mat = adjacence(7) mat """ Explanation: Après chaque question, on vérifie sur un petit exemple que cela fonctionne comme attendu. Exercice 1 Ce premier exercice aborde la problème d'un parcours de graphe non récursif. Q1 End of explanation """ import random def ajoute_points(mat,nb=5): ajout = { } while len(ajout) < 5 : i,j = random.randint(0,len(mat)-1),random.randint(0,len(mat)-1) if i < j and (i,j) not in ajout: mat[i][j] = 1 ajout[i,j] = 1 ajoute_points(mat) mat """ Explanation: Q2 Il faut ajouter 5 arcs au hasard en évitant d'ajouter deux fois le même. End of explanation """ def successeurs(adj,i): ligne = adj[i] # dans l'expression suivante, # s est la valeur de la matrice (0 ou 1) # i l'indice return [ i for i,s in enumerate(ligne) if s == 1 ] successeurs(mat, 1) """ Explanation: Q3 End of explanation """ def successeurs_dico(adj): return { i:successeurs(adj, i) for i in range(len(adj)) } dico = successeurs_dico(mat) dico """ Explanation: Q4 End of explanation """ def suites_chemin(chemin, dico): dernier = chemin[-1] res = [ ] for s in dico[dernier]: res.append ( chemin + [ s ] ) return res suites_chemin( [ 0, 1 ], dico) """ Explanation: Q5 End of explanation """ def parcours(adj): dico = successeurs_dico(adj) chemins = [ [ 0 ]] resultat = [ ] while len(chemins) > 0 : chemins2 = [] for chemin in chemins : res = suites_chemin(chemin, dico) if len(res) == 0: # chemin est un chemin qui ne peut être continué resultat.append ( chemin ) else: chemins2.extend ( res ) chemins = chemins2 return resultat parcours(mat) """ Explanation: Q6 End of explanation """ def adjacence8(N): # on crée uen matrice vide mat = [ [ 0 for j in range(N) ] for i in range(N) ] for i in range(0,N-1): for j in range(i+1,N): mat[i][j] = 1 return mat adj = adjacence8(7) adj che = parcours(adj) print("nombre",len(che)) che """ Explanation: Q7 La différence entre un parcours en profondeur et un parcours en largeur tient au fait qu'on préfère d'abord explorer le successeur direct, puis le successeur direct plutôt que les voisins du successeurs directe. Dans le premier cas, on aboutit très vite à un chemin terminé. Dans le second cas, on obtient les chemins plutôt vers la fin de l'algorithme. Dans la version proposée par l'algorithme, c'est un parcours en largeur qui est implémenté. Q8 La matrice en question est la suivante (pour $N=7$) : End of explanation """ for i in range(5,11): adj = adjacence8(i) che = parcours(adj) print(i, "-->",len(che)) """ Explanation: On fait une petite boucle pour intuiter le résultat : End of explanation """ l = [ -1, 4, 6, 4, 1, 9, 5 ] l.sort() l[:3] """ Explanation: Cela ressemble beaucoup à des puissances de deux. Cela suggère un raisonnement par récurrence. Chaque noeud $i$ est connecté à tous les suivantes $i+1$, $i+2$... On remarque que tous les chemins se termine par le dernier noeud $n$. Lorsqu'on ajoute le noeud $n+1$ au graphe, il sera le successeur de tous les autres. Pour un chemin donné, on peut soit l'ajouter à la fin, soit remplacer le dernier noeud $n$ par $n-1$. C'est ainsi qu'on multiplie par deux le nombre de chemins. S'il y a $n$ noeuds, on obtient $2^{n-2}$. Exercice 2 On suppose qu'on dispose d'un tableau de nombres non trié. Ecrire une fonction qui retourne les trois éléments minimaux. La première option consiste à utiliser la fonction sort. Celle-ci a un coût de $O(n \ln n)$ le programme est très simple. End of explanation """ def garde_3_element(tab): meilleur = [ ] for t in tab: if len(meilleur) < 3 : meilleur.append(t) meilleur.sort() elif t < meilleur[2] : meilleur[2] = t meilleur.sort() return meilleur garde_3_element(l) """ Explanation: Le problème qu'on cherche à résoudre est plus simple puisqu'il s'agit de ne garder que les trois premiers éléments. On n'a pas besoin de trier la fin de la liste. L'idée consiste à parcourir le tableau et à ne conserver que les trois premiers éléments. Si un élément est plus grand que le troisième élément, on ne s'en occupe pas. End of explanation """ def word2dict(mot): return { i: mot[:i] for i in range(len(mot)+1) } word2dict("mot"), word2dict("python") """ Explanation: Même si on utilise un tri, le coût est en en $O(n)$ car le tri opère sur au plus trois éléments. Exercice 3 Q1 End of explanation """ def two_words2dict(d1,d2): return { (i,j): (d1[i],d2[j]) for i in d1 for j in d2 } mot1 = "python" mot2 = "piton" d1 = word2dict(mot1) d2 = word2dict(mot2) vertices = two_words2dict(d1,d2) vertices """ Explanation: Q2 End of explanation """ len(vertices),(len(mot1)+1)*(len(mot2)+1) """ Explanation: Q3 Il y a autant d'éléments que $(len(mot1) +1)*(len(mot2)+1)$ puisqu'on fait une double boucle sur toutes les positions + 1 pour 0. Donc $(p+1)(q+1)$ si $p$ et $q$ sont les tailles des deux mots. End of explanation """ def add_edge_hv(vertices): edges = { } for edge1 in vertices: i1,j1 = edge1 for edge2 in vertices: i2,j2 = edge2 if (i2-i1==1 and j1==j2) or (j2-j1==1 and i1==i2) : edges[ edge1,edge2 ] = 1 return edges edges = add_edge_hv(vertices) edges """ Explanation: Q4 End of explanation """ len(edges), 2*len(mot1)*len(mot2)+len(mot1)+len(mot2) """ Explanation: Q5 Pour chaque noeud, on ajoute deux arcs excepté les noeuds qui correspond à la fin des mots. Donc $2(p+1)(q+1)-(p+1)-(q+1)=2pq+p+q$. End of explanation """ def cout(m1,m2): c1 = m1[-1] c2 = m2[-1] if c1==c2 : return 0 else : return 1 def ajoute_diagonale(edges, vertices): # edges = { } # on n'ajoute surtout pas cette ligne, sinon c'est comme si on effaçait tout ce que contient # edges for edge1 in vertices: i1,j1 = edge1 for edge2 in vertices: i2,j2 = edge2 if i2-i1==1 and j2-j1==1 : edges[ edge1,edge2 ] = cout (vertices [ edge2 ][0], vertices [ edge2 ][1] ) ajoute_diagonale(edges, vertices) edges """ Explanation: Q6 On s'inspire de la fonction précédente. Il serait plus efficace de les fusionner. End of explanation """ def loop_on_edges(distance, edges): for edge,cout in edges.items() : v1,v2 = edge if v1 in distance and (v2 not in distance or distance[v2] > distance[v1] + cout) : distance[v2] = distance[v1] + cout """ Explanation: Q7 L'algorithme du plus court chemin. End of explanation """ def loop_on_edges(distance, edges): misejour = 0 for edge,cout in edges.items() : v1,v2 = edge if v1 in distance and (v2 not in distance or distance[v2] > distance[v1] + cout) : distance[v2] = distance[v1] + cout misejour += 1 return misejour """ Explanation: Q8 La question était sans doute un peu mal posé car il est beaucoup plus facile pour la fonction loop_on_edges de savoir si le dictionnaire distance est modifié ou non. On la modifie pour qu'elle retourne le nombre de mises à jour. End of explanation """ def plus_court_chemin(edges): distance = { (0,0): 0 } m = 1 while m > 0: m = loop_on_edges(distance, edges) return distance resultat = plus_court_chemin(edges) resultat """ Explanation: Puis l'algorithme final : End of explanation """ print(mot1,mot2) resultat [ len(mot1), len(mot2) ] """ Explanation: Q9 Comme on a tout fait avec ces deux mots, il suffit de prendre la bonne valeur dans le tableau distance : End of explanation """ l = [1, 8, 5, 7, 3, 6, 9] l2 = [ (i%2, i) for i in l] l2.sort() res = [ b for a,b in l2 ] res """ Explanation: Exercice 4 On a un tableau d'entiers l = [1, 8, 5, 7, 3, 6, 9]. On veut placer les entiers pairs en premiers et les entiers impairs en derniers : 8, 6, 1, 5, 7, 3, 9. Ecrire une fonction qui fait cela. Le coût d'un tri est de $O(n \ln n)$. On construit d'abord le couple (parité, élément) pour chaque élément puis on trie de table. C'est la solution la plus simple. End of explanation """ def trie_parite(l): i = 0 j = len(l)-1 while i < j : while i < j and l[i]%2 == 0 : i += 1 while i < j and l[j]%2 == 1 : j -= 1 if i < j: ech = l[i] l[i] = l[j] l[j] = ech i += 1 j -= 1 l = l.copy() trie_parite(l) l """ Explanation: Dans cas précis, on ne souhaite pas trier sur les nombres mais sur leur parité. En quelque sorte, on ne s'intéresse pas de savoir dans quel ordre deux nombres pairs seront triés. Cela réduit le nombre d'opérations à effectuer. Une idée consiste à parcourir le tableau par les deux bouts et à échanger deux nombres dès que leur parité sont mal classées. End of explanation """
james-prior/cohpy
20150327-dojo-join.ipynb
mit
''.join(['hello', 'gnew', 'world']) ' '.join(['hello', 'gnew', 'world']) ','.join(['hello', 'gnew', 'world']) ', '.join(['hello', 'gnew', 'world']) ' and '.join(['hello', 'gnew', 'world']) """ Explanation: The join method for strings is confusing for many beginners, so here are some examples. The first one, that applies the join method to an empty string, is the simplest and yet perhaps the most confusing for some beginners. End of explanation """ ' and '.join(['hello', 'gnew']) ' and '.join(['hello']) ' and '.join([]) """ Explanation: It "does nothing" gracefully. End of explanation """ def foo(): yield 'hello' yield 'gnew' yield 'world' ' and '.join(foo()) """ Explanation: It accepts any kind of iterable that gives strings. The following example uses a trivial generator function. End of explanation """
lmoresi/UoM-VIEPS-Intro-to-Python
Notebooks/SolveMathProblems/3 - AdvancedFiniteDifferences.ipynb
mit
import numpy as np import matplotlib.pyplot as plt %matplotlib inline """ Explanation: Advanced finite difference This notebook assumes that you have completed the finite difference operations notebook. End of explanation """ voxel = np.load('voxel_data.npz')['data'] voxel.shape fig = plt.figure(1, figsize=(20, 5)) ax1 = fig.add_subplot(111) im1 = ax1.imshow(voxel, origin='lower', cmap='Paired', vmin=1, vmax=28) fig.colorbar(im1, ax=ax1) plt.show() """ Explanation: 1 Spatially variable diffusivity For most purposes we want $\kappa$ to be spatially variable. The heat equation can be expressed as, $$\frac{\partial}{\partial x} \left( \kappa \frac{\partial T}{\partial x} \right) + \frac{\partial}{\partial y} \left( \kappa \frac{\partial T}{\partial y} \right) = -H $$ The corresponding finite difference approximations in the $x$ and $y$ coordinates are, $$ \frac{\partial}{\partial x} \left( \kappa \frac{\partial T}{\partial x} \right) = \frac{1}{\Delta x} \left( \frac{ \kappa_{i+1/2,j} (T_{i+1,j}-T_{i,j}) }{\Delta x} - \frac{ \kappa_{i-1/2,j} (T_{i,j}-T_{i-1,j}) }{\Delta x} \right) $$ $$ \frac{\partial}{\partial y} \left( \kappa \frac{\partial T}{\partial y} \right) = \frac{1}{\Delta y} \left( \frac{ \kappa_{i,j+1/2} (T_{i,j+1}-T_{i,j}) }{\Delta y} - \frac{ \kappa_{i,j-1/2} (T_{i,j}-T_{i,j-1}) }{\Delta y} \right) $$ where \( \kappa_{i+1/2,j} \) can be averaged by, $$ \kappa_{i+1/2,j} = \frac{\kappa_{i+1,j} + \kappa_{i,j}}{2} $$ EXERCISE 1 Construct your own matrices using the finite difference approximation for non-constant diffusivity. 2. Modelling complex geometries Now for the fun part. In this section we take a 2D geological cross section (from some random part of the world) and model the temperature variation across different rock types. To do this requires assignment of thermal properties, \( \kappa, H \), to each layer in the cross section. Before you attempt this, here is a checklist of what you should have accomplished: 2D steady-state heat solver for non-constant \( \kappa \). Neumann and Dirichlet boundary conditions. Use of sparse matrices (optional, but recommended). Our model domain is 800 x 260 nodes and contains integers from 1 to 28 that correspond to a unique rock layer. Your task will be to create a conductivity and heat production field, that vary with lithology, and pass them to your solver. EXERCISE 2 Assign thermal properties to the voxel data and solve steady-state diffusion Create 3 plots: temperature, conductivity, and heat production. (You might want to give them distinct colourmaps to help distinguish them!) End of explanation """
zomansud/coursera
ml-classification/week-5/module-8-boosting-assignment-2-blank.ipynb
mit
import graphlab import matplotlib.pyplot as plt %matplotlib inline """ Explanation: Boosting a decision stump The goal of this notebook is to implement your own boosting module. Brace yourselves! This is going to be a fun and challenging assignment. Use SFrames to do some feature engineering. Modify the decision trees to incorporate weights. Implement Adaboost ensembling. Use your implementation of Adaboost to train a boosted decision stump ensemble. Evaluate the effect of boosting (adding more decision stumps) on performance of the model. Explore the robustness of Adaboost to overfitting. Let's get started! Fire up GraphLab Create Make sure you have the latest version of GraphLab Create (1.8.3 or newer). Upgrade by pip install graphlab-create --upgrade See this page for detailed instructions on upgrading. End of explanation """ loans = graphlab.SFrame('lending-club-data.gl/') """ Explanation: Getting the data ready We will be using the same LendingClub dataset as in the previous assignment. End of explanation """ features = ['grade', # grade of the loan 'term', # the term of the loan 'home_ownership', # home ownership status: own, mortgage or rent 'emp_length', # number of years of employment ] loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1) loans.remove_column('bad_loans') target = 'safe_loans' loans = loans[features + [target]] """ Explanation: Extracting the target and the feature columns We will now repeat some of the feature processing steps that we saw in the previous assignment: First, we re-assign the target to have +1 as a safe (good) loan, and -1 as a risky (bad) loan. Next, we select four categorical features: 1. grade of the loan 2. the length of the loan term 3. the home ownership status: own, mortgage, rent 4. number of years of employment. End of explanation """ safe_loans_raw = loans[loans[target] == 1] risky_loans_raw = loans[loans[target] == -1] # Undersample the safe loans. percentage = len(risky_loans_raw)/float(len(safe_loans_raw)) risky_loans = risky_loans_raw safe_loans = safe_loans_raw.sample(percentage, seed=1) loans_data = risky_loans_raw.append(safe_loans) print "Percentage of safe loans :", len(safe_loans) / float(len(loans_data)) print "Percentage of risky loans :", len(risky_loans) / float(len(loans_data)) print "Total number of loans in our new dataset :", len(loans_data) """ Explanation: Subsample dataset to make sure classes are balanced Just as we did in the previous assignment, we will undersample the larger class (safe loans) in order to balance out our dataset. This means we are throwing away many data points. We use seed=1 so everyone gets the same results. End of explanation """ loans_data = risky_loans.append(safe_loans) for feature in features: loans_data_one_hot_encoded = loans_data[feature].apply(lambda x: {x: 1}) loans_data_unpacked = loans_data_one_hot_encoded.unpack(column_name_prefix=feature) # Change None's to 0's for column in loans_data_unpacked.column_names(): loans_data_unpacked[column] = loans_data_unpacked[column].fillna(0) loans_data.remove_column(feature) loans_data.add_columns(loans_data_unpacked) """ Explanation: Note: There are many approaches for dealing with imbalanced data, including some where we modify the learning algorithm. These approaches are beyond the scope of this course, but some of them are reviewed in this paper. For this assignment, we use the simplest possible approach, where we subsample the overly represented class to get a more balanced dataset. In general, and especially when the data is highly imbalanced, we recommend using more advanced methods. Transform categorical data into binary features In this assignment, we will work with binary decision trees. Since all of our features are currently categorical features, we want to turn them into binary features using 1-hot encoding. We can do so with the following code block (see the first assignments for more details): End of explanation """ features = loans_data.column_names() features.remove('safe_loans') # Remove the response variable features """ Explanation: Let's see what the feature columns look like now: End of explanation """ train_data, test_data = loans_data.random_split(0.8, seed=1) """ Explanation: Train-test split We split the data into training and test sets with 80% of the data in the training set and 20% of the data in the test set. We use seed=1 so that everyone gets the same result. End of explanation """ def intermediate_node_weighted_mistakes(labels_in_node, data_weights): # Sum the weights of all entries with label +1 total_weight_positive = sum(data_weights[labels_in_node == +1]) # Weight of mistakes for predicting all -1's is equal to the sum above ### YOUR CODE HERE weighted_mistakes_all_negative = sum(data_weights[labels_in_node != -1]) # Sum the weights of all entries with label -1 ### YOUR CODE HERE total_weight_negative = sum(data_weights[labels_in_node == -1]) # Weight of mistakes for predicting all +1's is equal to the sum above ### YOUR CODE HERE weighted_mistakes_all_positive = sum(data_weights[labels_in_node != +1]) # Return the tuple (weight, class_label) representing the lower of the two weights # class_label should be an integer of value +1 or -1. # If the two weights are identical, return (weighted_mistakes_all_positive,+1) ### YOUR CODE HERE if weighted_mistakes_all_negative < weighted_mistakes_all_positive: return (weighted_mistakes_all_negative, -1) else: return (weighted_mistakes_all_positive, +1) """ Explanation: Weighted decision trees Let's modify our decision tree code from Module 5 to support weighting of individual data points. Weighted error definition Consider a model with $N$ data points with: * Predictions $\hat{y}_1 ... \hat{y}_n$ * Target $y_1 ... y_n$ * Data point weights $\alpha_1 ... \alpha_n$. Then the weighted error is defined by: $$ \mathrm{E}(\mathbf{\alpha}, \mathbf{\hat{y}}) = \frac{\sum_{i=1}^{n} \alpha_i \times 1[y_i \neq \hat{y_i}]}{\sum_{i=1}^{n} \alpha_i} $$ where $1[y_i \neq \hat{y_i}]$ is an indicator function that is set to $1$ if $y_i \neq \hat{y_i}$. Write a function to compute weight of mistakes Write a function that calculates the weight of mistakes for making the "weighted-majority" predictions for a dataset. The function accepts two inputs: * labels_in_node: Targets $y_1 ... y_n$ * data_weights: Data point weights $\alpha_1 ... \alpha_n$ We are interested in computing the (total) weight of mistakes, i.e. $$ \mathrm{WM}(\mathbf{\alpha}, \mathbf{\hat{y}}) = \sum_{i=1}^{n} \alpha_i \times 1[y_i \neq \hat{y_i}]. $$ This quantity is analogous to the number of mistakes, except that each mistake now carries different weight. It is related to the weighted error in the following way: $$ \mathrm{E}(\mathbf{\alpha}, \mathbf{\hat{y}}) = \frac{\mathrm{WM}(\mathbf{\alpha}, \mathbf{\hat{y}})}{\sum_{i=1}^{n} \alpha_i} $$ The function intermediate_node_weighted_mistakes should first compute two weights: * $\mathrm{WM}{-1}$: weight of mistakes when all predictions are $\hat{y}_i = -1$ i.e $\mathrm{WM}(\mathbf{\alpha}, \mathbf{-1}$) * $\mathrm{WM}{+1}$: weight of mistakes when all predictions are $\hat{y}_i = +1$ i.e $\mbox{WM}(\mathbf{\alpha}, \mathbf{+1}$) where $\mathbf{-1}$ and $\mathbf{+1}$ are vectors where all values are -1 and +1 respectively. After computing $\mathrm{WM}{-1}$ and $\mathrm{WM}{+1}$, the function intermediate_node_weighted_mistakes should return the lower of the two weights of mistakes, along with the class associated with that weight. We have provided a skeleton for you with YOUR CODE HERE to be filled in several places. End of explanation """ example_labels = graphlab.SArray([-1, -1, 1, 1, 1]) example_data_weights = graphlab.SArray([1., 2., .5, 1., 1.]) if intermediate_node_weighted_mistakes(example_labels, example_data_weights) == (2.5, -1): print 'Test passed!' else: print 'Test failed... try again!' """ Explanation: Checkpoint: Test your intermediate_node_weighted_mistakes function, run the following cell: End of explanation """ def best_splitting_feature(data, features, target, data_weights): # These variables will keep track of the best feature and the corresponding error best_feature = None best_error = float('+inf') num_points = float(len(data)) # Loop through each feature to consider splitting on that feature for feature in features: # The left split will have all data points where the feature value is 0 # The right split will have all data points where the feature value is 1 left_split = data[data[feature] == 0] right_split = data[data[feature] == 1] # Apply the same filtering to data_weights to create left_data_weights, right_data_weights ## YOUR CODE HERE left_data_weights = data_weights[data[feature] == 0] right_data_weights = data_weights[data[feature] == 1] # DIFFERENT HERE # Calculate the weight of mistakes for left and right sides ## YOUR CODE HERE left_weighted_mistakes, left_class = intermediate_node_weighted_mistakes( left_split['safe_loans'], left_data_weights ) right_weighted_mistakes, right_class = intermediate_node_weighted_mistakes( right_split['safe_loans'], right_data_weights ) # DIFFERENT HERE # Compute weighted error by computing # ( [weight of mistakes (left)] + [weight of mistakes (right)] ) / [total weight of all data points] ## YOUR CODE HERE error = (left_weighted_mistakes + right_weighted_mistakes) / sum(data_weights) # If this is the best error we have found so far, store the feature and the error if error < best_error: best_feature = feature best_error = error # Return the best feature we found return best_feature """ Explanation: Recall that the classification error is defined as follows: $$ \mbox{classification error} = \frac{\mbox{# mistakes}}{\mbox{# all data points}} $$ Quiz Question: If we set the weights $\mathbf{\alpha} = 1$ for all data points, how is the weight of mistakes $\mbox{WM}(\mathbf{\alpha}, \mathbf{\hat{y}})$ related to the classification error? Function to pick best feature to split on We continue modifying our decision tree code from the earlier assignment to incorporate weighting of individual data points. The next step is to pick the best feature to split on. The best_splitting_feature function is similar to the one from the earlier assignment with two minor modifications: 1. The function best_splitting_feature should now accept an extra parameter data_weights to take account of weights of data points. 2. Instead of computing the number of mistakes in the left and right side of the split, we compute the weight of mistakes for both sides, add up the two weights, and divide it by the total weight of the data. Complete the following function. Comments starting with DIFFERENT HERE mark the sections where the weighted version differs from the original implementation. End of explanation """ example_data_weights = graphlab.SArray(len(train_data)* [1.5]) if best_splitting_feature(train_data, features, target, example_data_weights) == 'term. 36 months': print 'Test passed!' else: print 'Test failed... try again!' """ Explanation: Checkpoint: Now, we have another checkpoint to make sure you are on the right track. End of explanation """ def create_leaf(target_values, data_weights): # Create a leaf node leaf = {'splitting_feature' : None, 'is_leaf': True} # Computed weight of mistakes. weighted_error, best_class = intermediate_node_weighted_mistakes(target_values, data_weights) # Store the predicted class (1 or -1) in leaf['prediction'] leaf['prediction'] = best_class return leaf """ Explanation: Note. If you get an exception in the line of "the logical filter has different size than the array", try upgradting your GraphLab Create installation to 1.8.3 or newer. Very Optional. Relationship between weighted error and weight of mistakes By definition, the weighted error is the weight of mistakes divided by the weight of all data points, so $$ \mathrm{E}(\mathbf{\alpha}, \mathbf{\hat{y}}) = \frac{\sum_{i=1}^{n} \alpha_i \times 1[y_i \neq \hat{y_i}]}{\sum_{i=1}^{n} \alpha_i} = \frac{\mathrm{WM}(\mathbf{\alpha}, \mathbf{\hat{y}})}{\sum_{i=1}^{n} \alpha_i}. $$ In the code above, we obtain $\mathrm{E}(\mathbf{\alpha}, \mathbf{\hat{y}})$ from the two weights of mistakes from both sides, $\mathrm{WM}(\mathbf{\alpha}{\mathrm{left}}, \mathbf{\hat{y}}{\mathrm{left}})$ and $\mathrm{WM}(\mathbf{\alpha}{\mathrm{right}}, \mathbf{\hat{y}}{\mathrm{right}})$. First, notice that the overall weight of mistakes $\mathrm{WM}(\mathbf{\alpha}, \mathbf{\hat{y}})$ can be broken into two weights of mistakes over either side of the split: $$ \mathrm{WM}(\mathbf{\alpha}, \mathbf{\hat{y}}) = \sum_{i=1}^{n} \alpha_i \times 1[y_i \neq \hat{y_i}] = \sum_{\mathrm{left}} \alpha_i \times 1[y_i \neq \hat{y_i}] + \sum_{\mathrm{right}} \alpha_i \times 1[y_i \neq \hat{y_i}]\ = \mathrm{WM}(\mathbf{\alpha}{\mathrm{left}}, \mathbf{\hat{y}}{\mathrm{left}}) + \mathrm{WM}(\mathbf{\alpha}{\mathrm{right}}, \mathbf{\hat{y}}{\mathrm{right}}) $$ We then divide through by the total weight of all data points to obtain $\mathrm{E}(\mathbf{\alpha}, \mathbf{\hat{y}})$: $$ \mathrm{E}(\mathbf{\alpha}, \mathbf{\hat{y}}) = \frac{\mathrm{WM}(\mathbf{\alpha}{\mathrm{left}}, \mathbf{\hat{y}}{\mathrm{left}}) + \mathrm{WM}(\mathbf{\alpha}{\mathrm{right}}, \mathbf{\hat{y}}{\mathrm{right}})}{\sum_{i=1}^{n} \alpha_i} $$ Building the tree With the above functions implemented correctly, we are now ready to build our decision tree. Recall from the previous assignments that each node in the decision tree is represented as a dictionary which contains the following keys: { 'is_leaf' : True/False. 'prediction' : Prediction at the leaf node. 'left' : (dictionary corresponding to the left tree). 'right' : (dictionary corresponding to the right tree). 'features_remaining' : List of features that are posible splits. } Let us start with a function that creates a leaf node given a set of target values: End of explanation """ def weighted_decision_tree_create(data, features, target, data_weights, current_depth = 1, max_depth = 10): remaining_features = features[:] # Make a copy of the features. target_values = data[target] print "--------------------------------------------------------------------" print "Subtree, depth = %s (%s data points)." % (current_depth, len(target_values)) # Stopping condition 1. Error is 0. if intermediate_node_weighted_mistakes(target_values, data_weights)[0] <= 1e-15: print "Stopping condition 1 reached." return create_leaf(target_values, data_weights) # Stopping condition 2. No more features. if remaining_features == []: print "Stopping condition 2 reached." return create_leaf(target_values, data_weights) # Additional stopping condition (limit tree depth) if current_depth > max_depth: print "Reached maximum depth. Stopping for now." return create_leaf(target_values, data_weights) splitting_feature = best_splitting_feature(data, features, target, data_weights) remaining_features.remove(splitting_feature) left_split = data[data[splitting_feature] == 0] right_split = data[data[splitting_feature] == 1] left_data_weights = data_weights[data[splitting_feature] == 0] right_data_weights = data_weights[data[splitting_feature] == 1] print "Split on feature %s. (%s, %s)" % (\ splitting_feature, len(left_split), len(right_split)) # Create a leaf node if the split is "perfect" if len(left_split) == len(data): print "Creating leaf node." return create_leaf(left_split[target], data_weights) if len(right_split) == len(data): print "Creating leaf node." return create_leaf(right_split[target], data_weights) # Repeat (recurse) on left and right subtrees left_tree = weighted_decision_tree_create( left_split, remaining_features, target, left_data_weights, current_depth + 1, max_depth) right_tree = weighted_decision_tree_create( right_split, remaining_features, target, right_data_weights, current_depth + 1, max_depth) return {'is_leaf' : False, 'prediction' : None, 'splitting_feature': splitting_feature, 'left' : left_tree, 'right' : right_tree} """ Explanation: We provide a function that learns a weighted decision tree recursively and implements 3 stopping conditions: 1. All data points in a node are from the same class. 2. No more features to split on. 3. Stop growing the tree when the tree depth reaches max_depth. End of explanation """ def count_nodes(tree): if tree['is_leaf']: return 1 return 1 + count_nodes(tree['left']) + count_nodes(tree['right']) """ Explanation: Here is a recursive function to count the nodes in your tree: End of explanation """ example_data_weights = graphlab.SArray([1.0 for i in range(len(train_data))]) small_data_decision_tree = weighted_decision_tree_create(train_data, features, target, example_data_weights, max_depth=2) if count_nodes(small_data_decision_tree) == 7: print 'Test passed!' else: print 'Test failed... try again!' print 'Number of nodes found:', count_nodes(small_data_decision_tree) print 'Number of nodes that should be there: 7' """ Explanation: Run the following test code to check your implementation. Make sure you get 'Test passed' before proceeding. End of explanation """ small_data_decision_tree """ Explanation: Let us take a quick look at what the trained tree is like. You should get something that looks like the following {'is_leaf': False, 'left': {'is_leaf': False, 'left': {'is_leaf': True, 'prediction': -1, 'splitting_feature': None}, 'prediction': None, 'right': {'is_leaf': True, 'prediction': 1, 'splitting_feature': None}, 'splitting_feature': 'grade.A' }, 'prediction': None, 'right': {'is_leaf': False, 'left': {'is_leaf': True, 'prediction': 1, 'splitting_feature': None}, 'prediction': None, 'right': {'is_leaf': True, 'prediction': -1, 'splitting_feature': None}, 'splitting_feature': 'grade.D' }, 'splitting_feature': 'term. 36 months' } End of explanation """ def classify(tree, x, annotate = False): # If the node is a leaf node. if tree['is_leaf']: if annotate: print "At leaf, predicting %s" % tree['prediction'] return tree['prediction'] else: # Split on feature. split_feature_value = x[tree['splitting_feature']] if annotate: print "Split on %s = %s" % (tree['splitting_feature'], split_feature_value) if split_feature_value == 0: return classify(tree['left'], x, annotate) else: return classify(tree['right'], x, annotate) """ Explanation: Making predictions with a weighted decision tree We give you a function that classifies one data point. It can also return the probability if you want to play around with that as well. End of explanation """ def evaluate_classification_error(tree, data): # Apply the classify(tree, x) to each row in your data prediction = data.apply(lambda x: classify(tree, x)) # Once you've made the predictions, calculate the classification error return (prediction != data[target]).sum() / float(len(data)) evaluate_classification_error(small_data_decision_tree, test_data) """ Explanation: Evaluating the tree Now, we will write a function to evaluate a decision tree by computing the classification error of the tree on the given dataset. Again, recall that the classification error is defined as follows: $$ \mbox{classification error} = \frac{\mbox{# mistakes}}{\mbox{# all data points}} $$ The function called evaluate_classification_error takes in as input: 1. tree (as described above) 2. data (an SFrame) The function does not change because of adding data point weights. End of explanation """ # Assign weights example_data_weights = graphlab.SArray([1.] * 10 + [0.]*(len(train_data) - 20) + [1.] * 10) # Train a weighted decision tree model. small_data_decision_tree_subset_20 = weighted_decision_tree_create(train_data, features, target, example_data_weights, max_depth=2) """ Explanation: Example: Training a weighted decision tree To build intuition on how weighted data points affect the tree being built, consider the following: Suppose we only care about making good predictions for the first 10 and last 10 items in train_data, we assign weights: * 1 to the last 10 items * 1 to the first 10 items * and 0 to the rest. Let us fit a weighted decision tree with max_depth = 2. End of explanation """ subset_20 = train_data.head(10).append(train_data.tail(10)) evaluate_classification_error(small_data_decision_tree_subset_20, subset_20) """ Explanation: Now, we will compute the classification error on the subset_20, i.e. the subset of data points whose weight is 1 (namely the first and last 10 data points). End of explanation """ evaluate_classification_error(small_data_decision_tree_subset_20, train_data) """ Explanation: Now, let us compare the classification error of the model small_data_decision_tree_subset_20 on the entire test set train_data: End of explanation """ from math import log from math import exp def adaboost_with_tree_stumps(data, features, target, num_tree_stumps): # start with unweighted data alpha = graphlab.SArray([1.]*len(data)) weights = [] tree_stumps = [] target_values = data[target] for t in xrange(num_tree_stumps): print '=====================================================' print 'Adaboost Iteration %d' % t print '=====================================================' # Learn a weighted decision tree stump. Use max_depth=1 tree_stump = weighted_decision_tree_create(data, features, target, data_weights=alpha, max_depth=1) tree_stumps.append(tree_stump) # Make predictions predictions = data.apply(lambda x: classify(tree_stump, x)) # Produce a Boolean array indicating whether # each data point was correctly classified is_correct = predictions == target_values is_wrong = predictions != target_values # Compute weighted error # YOUR CODE HERE weighted_error = alpha[is_wrong].sum() / sum(alpha) # Compute model coefficient using weighted error # YOUR CODE HERE weight = 0.5 * log((1 - weighted_error) / weighted_error) weights.append(weight) # Adjust weights on data point adjustment = is_correct.apply(lambda is_correct : exp(-weight) if is_correct else exp(weight)) # Scale alpha by multiplying by adjustment ## YOUR CODE HERE alpha = alpha * adjustment # Then normalize data points weights alpha_sum = sum(alpha) alpha = alpha / alpha_sum return weights, tree_stumps """ Explanation: The model small_data_decision_tree_subset_20 performs a lot better on subset_20 than on train_data. So, what does this mean? * The points with higher weights are the ones that are more important during the training process of the weighted decision tree. * The points with zero weights are basically ignored during training. Quiz Question: Will you get the same model as small_data_decision_tree_subset_20 if you trained a decision tree with only the 20 data points with non-zero weights from the set of points in subset_20? Implementing your own Adaboost (on decision stumps) Now that we have a weighted decision tree working, it takes only a bit of work to implement Adaboost. For the sake of simplicity, let us stick with decision tree stumps by training trees with max_depth=1. Recall from the lecture the procedure for Adaboost: 1. Start with unweighted data with $\alpha_j = 1$ 2. For t = 1,...T: * Learn $f_t(x)$ with data weights $\alpha_j$ * Compute coefficient $\hat{w}t$: $$\hat{w}_t = \frac{1}{2}\ln{\left(\frac{1- \mbox{E}(\mathbf{\alpha}, \mathbf{\hat{y}})}{\mbox{E}(\mathbf{\alpha}, \mathbf{\hat{y}})}\right)}$$ * Re-compute weights $\alpha_j$: $$\alpha_j \gets \begin{cases} \alpha_j \exp{(-\hat{w}_t)} & \text{ if }f_t(x_j) = y_j\ \alpha_j \exp{(\hat{w}_t)} & \text{ if }f_t(x_j) \neq y_j \end{cases}$$ * Normalize weights $\alpha_j$: $$\alpha_j \gets \frac{\alpha_j}{\sum{i=1}^{N}{\alpha_i}} $$ Complete the skeleton for the following code to implement adaboost_with_tree_stumps. Fill in the places with YOUR CODE HERE. End of explanation """ stump_weights, tree_stumps = adaboost_with_tree_stumps(train_data, features, target, num_tree_stumps=2) def print_stump(tree): split_name = tree['splitting_feature'] # split_name is something like 'term. 36 months' if split_name is None: print "(leaf, label: %s)" % tree['prediction'] return None split_feature, split_value = split_name.split('.') print ' root' print ' |---------------|----------------|' print ' | |' print ' | |' print ' | |' print ' [{0} == 0]{1}[{0} == 1] '.format(split_name, ' '*(27-len(split_name))) print ' | |' print ' | |' print ' | |' print ' (%s) (%s)' \ % (('leaf, label: ' + str(tree['left']['prediction']) if tree['left']['is_leaf'] else 'subtree'), ('leaf, label: ' + str(tree['right']['prediction']) if tree['right']['is_leaf'] else 'subtree')) """ Explanation: Checking your Adaboost code Train an ensemble of two tree stumps and see which features those stumps split on. We will run the algorithm with the following parameters: * train_data * features * target * num_tree_stumps = 2 End of explanation """ print_stump(tree_stumps[0]) """ Explanation: Here is what the first stump looks like: End of explanation """ print_stump(tree_stumps[1]) print stump_weights """ Explanation: Here is what the next stump looks like: End of explanation """ stump_weights, tree_stumps = adaboost_with_tree_stumps(train_data, features, target, num_tree_stumps=10) """ Explanation: If your Adaboost is correctly implemented, the following things should be true: tree_stumps[0] should split on term. 36 months with the prediction -1 on the left and +1 on the right. tree_stumps[1] should split on grade.A with the prediction -1 on the left and +1 on the right. Weights should be approximately [0.158, 0.177] Reminders - Stump weights ($\mathbf{\hat{w}}$) and data point weights ($\mathbf{\alpha}$) are two different concepts. - Stump weights ($\mathbf{\hat{w}}$) tell you how important each stump is while making predictions with the entire boosted ensemble. - Data point weights ($\mathbf{\alpha}$) tell you how important each data point is while training a decision stump. Training a boosted ensemble of 10 stumps Let us train an ensemble of 10 decision tree stumps with Adaboost. We run the adaboost_with_tree_stumps function with the following parameters: * train_data * features * target * num_tree_stumps = 10 End of explanation """ import numpy as np def predict_adaboost(stump_weights, tree_stumps, data): scores = graphlab.SArray([0.]*len(data)) for i, tree_stump in enumerate(tree_stumps): predictions = data.apply(lambda x: classify(tree_stump, x)) # Accumulate predictions on scores array # YOUR CODE HERE scores += (stump_weights[i] * predictions) return scores.apply(lambda score : +1 if score > 0 else -1) predictions = predict_adaboost(stump_weights, tree_stumps, test_data) accuracy = graphlab.evaluation.accuracy(test_data[target], predictions) print 'Accuracy of 10-component ensemble = %s' % accuracy """ Explanation: Making predictions Recall from the lecture that in order to make predictions, we use the following formula: $$ \hat{y} = sign\left(\sum_{t=1}^T \hat{w}_t f_t(x)\right) $$ We need to do the following things: - Compute the predictions $f_t(x)$ using the $t$-th decision tree - Compute $\hat{w}_t f_t(x)$ by multiplying the stump_weights with the predictions $f_t(x)$ from the decision trees - Sum the weighted predictions over each stump in the ensemble. Complete the following skeleton for making predictions: End of explanation """ stump_weights """ Explanation: Now, let us take a quick look what the stump_weights look like at the end of each iteration of the 10-stump ensemble: End of explanation """ # this may take a while... stump_weights, tree_stumps = adaboost_with_tree_stumps(train_data, features, target, num_tree_stumps=30) """ Explanation: Quiz Question: Are the weights monotonically decreasing, monotonically increasing, or neither? Reminder: Stump weights ($\mathbf{\hat{w}}$) tell you how important each stump is while making predictions with the entire boosted ensemble. Performance plots In this section, we will try to reproduce some of the performance plots dicussed in the lecture. How does accuracy change with adding stumps to the ensemble? We will now train an ensemble with: * train_data * features * target * num_tree_stumps = 30 Once we are done with this, we will then do the following: * Compute the classification error at the end of each iteration. * Plot a curve of classification error vs iteration. First, lets train the model. End of explanation """ error_all = [] for n in xrange(1, 31): predictions = predict_adaboost(stump_weights[:n], tree_stumps[:n], train_data) error = 1.0 - graphlab.evaluation.accuracy(train_data[target], predictions) error_all.append(error) print "Iteration %s, training error = %s" % (n, error_all[n-1]) """ Explanation: Computing training error at the end of each iteration Now, we will compute the classification error on the train_data and see how it is reduced as trees are added. End of explanation """ plt.rcParams['figure.figsize'] = 7, 5 plt.plot(range(1,31), error_all, '-', linewidth=4.0, label='Training error') plt.title('Performance of Adaboost ensemble') plt.xlabel('# of iterations') plt.ylabel('Classification error') plt.legend(loc='best', prop={'size':15}) plt.rcParams.update({'font.size': 16}) """ Explanation: Visualizing training error vs number of iterations We have provided you with a simple code snippet that plots classification error with the number of iterations. End of explanation """ test_error_all = [] for n in xrange(1, 31): predictions = predict_adaboost(stump_weights[:n], tree_stumps[:n], test_data) error = 1.0 - graphlab.evaluation.accuracy(test_data[target], predictions) test_error_all.append(error) print "Iteration %s, test error = %s" % (n, test_error_all[n-1]) """ Explanation: Quiz Question: Which of the following best describes a general trend in accuracy as we add more and more components? Answer based on the 30 components learned so far. Training error goes down monotonically, i.e. the training error reduces with each iteration but never increases. Training error goes down in general, with some ups and downs in the middle. Training error goes up in general, with some ups and downs in the middle. Training error goes down in the beginning, achieves the best error, and then goes up sharply. None of the above Evaluation on the test data Performing well on the training data is cheating, so lets make sure it works on the test_data as well. Here, we will compute the classification error on the test_data at the end of each iteration. End of explanation """ plt.rcParams['figure.figsize'] = 7, 5 plt.plot(range(1,31), error_all, '-', linewidth=4.0, label='Training error') plt.plot(range(1,31), test_error_all, '-', linewidth=4.0, label='Test error') plt.title('Performance of Adaboost ensemble') plt.xlabel('# of iterations') plt.ylabel('Classification error') plt.rcParams.update({'font.size': 16}) plt.legend(loc='best', prop={'size':15}) plt.tight_layout() """ Explanation: Visualize both the training and test errors Now, let us plot the training & test error with the number of iterations. End of explanation """
kdmurray91/kwip-experiments
bifurcating/TreeSimulation.ipynb
mit
import utils import gzip import random import string import math import ete3 as ete from skbio import Alignment, DNA, DistanceMatrix from skbio.tree import nj import numpy as np import skbio import sys seed = 1003 genome_size = 1 # mbp num_samples = 8 num_runs = 3 mean_n_reads = 5e5 sd_n_reads = mean_n_reads * 0.1 # Coeff of Var = 0.1 min_n_reads = mean_n_reads / 100.0 countgraph_size = 1e8 """ Explanation: Asymmetric Tree Dataset This dataset intends to create a biforcating tree with 8 samples, three runs per sample. End of explanation """ ! rm -rf data ! mkdir data for subdir in ['genomes', 'fastq', 'countgraphs']: ! mkdir data/{subdir} """ Explanation: Don't edit below here Constants are all in the cell above this End of explanation """ random.seed(seed) utils.random.seed(seed) """ Explanation: Set a random seed and seed the RNG End of explanation """ tree = '(((A:0.1,B:0.1):0.3,C:0.2):0.1,((D:0.4, (E:0.2,F:0.15):0.3):0.1,(G:0.2,H:0.15):0.2):0.6);' print(ete.Tree(tree)) with open("data/asym_sample_truth.nwk", 'w') as fh: print(tree, file=fh) """ Explanation: Generate sample genomes First, we make a tree with the following structure End of explanation """ seqgen = 'seq-gen -mGTR -s0.01 -l{len} < data/asym_sample_truth.nwk >data/asym_genomes.phy'.format(len=int(genome_size*1e6)) utils.run_cmd(seqgen) """ Explanation: Make genome sequences with seq-gen Using the GTR model of sequence evolution End of explanation """ seqs = [] with open("data/asym_genomes.phy") as fh: next(fh) # nuke first line for line in fh: name, seq = line.strip().split() seqs.append(skbio.Sequence(seq, {'id': name})) aln = skbio.Alignment(seqs) """ Explanation: Make a random genome, and samples derived from it. Write it to a fasta file. End of explanation """ distmat = aln.distances() distmat """ Explanation: Make NJ tree End of explanation """ distmat_reps = DistanceMatrix( np.repeat(np.repeat(distmat.data,num_runs, axis=1), num_runs, axis=0)) run_names = ['{}-{}'.format(g, i) for g in distmat.ids for i in range(num_runs)] distmat_reps.ids = run_names distmat_reps tree_reps = nj(distmat_reps) tree_reps.write('data/asym_runs.nwk') """ Explanation: Make a repeated version of this distance matrix, which can be directly (visually) compared with the result of kWIP, which will of course be of the runs, not the genomes themselves. End of explanation """ runs = {} run_read_counts = {} for seq in seqs: genome = seq.metadata['id'] print('Genome', genome, end=', reps: ') runs[genome] = [] # write genome fas = 'data/genomes/asym_{}.fasta'.format(genome) seq.write(fas, format='fasta') # create each run's reads for j in range(num_runs): print(j, end=' ') sys.stdout.flush() run = '{}-{}'.format(genome, j) fq = "data/fastq/asym_{}_il.fq".format(run) n_reads = max(int(random.gauss(mean_n_reads, sd_n_reads)), min_n_reads) utils.wgsim(n_reads, fas, fq) runs[genome].append(fq) run_read_counts[run] = n_reads print() """ Explanation: Generate reads End of explanation """ def countgraph(fq, cg, x=1e9, k=20, n=1, quiet=True): lic = "load-into-countgraph.py -T 12 -N {N} -k {k} -x {x} -s tsv -b {cg} {fq}".format( N=n, k=k, x=x, cg=cg, fq=fq) print(lic) sys.stdout.flush() utils.run_cmd(lic, quiet) countgraphs = [] for genome in runs: for i, fq in enumerate(runs[genome]): cg = 'data/countgraphs/bifork_{}-{}.cg.gz'.format(genome, i) countgraphs.append(cg) countgraph(fq, cg, x=countgraph_size, k=20) """ Explanation: Hash samples End of explanation """ def kwip(countgraphs, dist, kern='', weighted=True, quiet=True): if kern: kern = '-k {kern}'.format(kern=kern) unweight = '' if not weighted: unweight = '-U' cgs = ' '.join(countgraphs) cmd = "kwip {kern} {wht} -d {dist} {cgs}".format(wht=unweight, kern=kern, dist=dist, cgs=cgs) print(cmd) utils.run_cmd(cmd, quiet) kwip(sorted(countgraphs), 'data/asym-kwip.dist', 'data/asym-kwip.kern') kwip(sorted(countgraphs), 'data/asym-ip.dist', 'data/asym-ip.kern', weighted=False) """ Explanation: Run kWIP End of explanation """ from skbio import DistanceMatrix from skbio.tree import nj """ Explanation: Analyse the output End of explanation """ kwip_dist = DistanceMatrix.read("data/asym-kwip.dist") ip_dist = DistanceMatrix.read("data/asym-ip.dist") kwip_dist.ids = run_names ip_dist.ids = run_names kwip_dist.plot(title="kWIP Distances") distmat_reps.ids = run_names distmat_reps.plot(title='True Distances') ip_dist.plot(title='Unweighted kWIP Distance') print() # kwip_tree = nj(kwip_dist) kwip_tree.write('data/asym_kwip.nwk') ip_tree = nj(ip_dist) ip_tree.write('data/asym_ip.nwk') """ Explanation: Make a tree from kWIP's output End of explanation """ true_tree = ete.Tree("data/asym_runs.nwk") print(true_tree) kwip_tree = ete.Tree("data/asym_kwip.nwk") print(kwip_tree) ip_tree = ete.Tree("data/asym_ip.nwk") print(ip_tree) """ Explanation: Robinson-Foulds distance A measure of tree concordance. Smaller is better End of explanation """ print('kWIP:', kwip_tree.robinson_foulds(true_tree, unrooted_trees=True)[0]) print('Unweighted:', ip_tree.robinson_foulds(true_tree, unrooted_trees=True)[0]) """ Explanation: And the RF distance is.... End of explanation """ from scipy.cluster import hierarchy as hier import matplotlib.pyplot as plt %matplotlib inline z = hier.linkage(kwip_dist.condensed_form(), method='complete') plt.figure(figsize=(10, 10)) x = hier.dendrogram(z, labels=run_names) """ Explanation: Hierarchical clustering And ploting by matplotlib End of explanation """
wallinm1/kaggle-facebook-bot
facebook_notebook.ipynb
mit
import pandas as pd import re import gc import numpy as np from scipy import sparse from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import MinMaxScaler from sklearn.feature_selection import SelectPercentile, chi2 from sklearn.externals import joblib import xgboost as xgb """ Explanation: Introduction In this Jupyter notebook, I present my approach to the recent Kaggle competition, Facebook Recruiting IV: Human or Robot?. The main idea was to treat the modeling problem as somewhat of a text classification problem by sorting the bid data for each bidder in chronological order and using this sorted bid data as a text-based "fingerprint" of the bidder's activities. In addition to this, I computed some numerical features for the time differences between bids and the unique counts of the different entries. The final model ended up being a bag of 15 XGBoost models on a sparse matrix of tfidf-vectorized text features concatenated with scaled numerical features. Libraries For the most part, I used Python standard libraries and the scientific Python libraries available in the Anaconda distribution (pandas, scikit-learn, scipy and numpy). The only slightly more exotic library is XGBoost. For installing XGBoost on Windows, these instructions by Alejandro Simkievich and this repository on XGBoost's Github profile were helpful. End of explanation """ df_bids = pd.read_csv('data/bids.csv') """ Explanation: Feature engineering First, we read the bid data: End of explanation """ df_bids = df_bids.replace({' ': ''}, regex = True) #remove spaces """ Explanation: Eventually, we will tokenize the bid information on spaces, so we remove any additional spaces from the data. End of explanation """ df_bids_sorted = df_bids.sort(['bidder_id', 'time'], ascending = [True, True]) """ Explanation: A key part of the approach is dealing with the bids in chronological order. Hence, we sort the bids in ascending order of bidder_ids and time. End of explanation """ #dataframe for aggregated bid data bids = pd.DataFrame(data = df_bids_sorted['bidder_id'].unique(), columns = ['bidder_id'], index = df_bids_sorted['bidder_id'].unique()) """ Explanation: Then we initialize a bids-dataframe where the aggregated bid information for each bidder_id will be gathered. End of explanation """ #auction counts counts = df_bids_sorted.groupby('bidder_id')['bidder_id'].agg('count') bids['auction_count_num'] = counts """ Explanation: As a first entry into the bids-dataframe, we count the number of auctions for each bidder_id. End of explanation """ timediff = df_bids_sorted.groupby('bidder_id')['time'].diff() timediff_str = timediff.astype(str).fillna('') df_bids_sorted['timediff_num'] = timediff df_bids_sorted['timediff'] = timediff_str """ Explanation: Then, we compute the time differences between bids into the df_bids_sorted-dataframe. These time differences are included in both numeric and string form. I noticed that there were some time differences that occur quite frequently, and a text processing of time differences should be able to identify these types of patterns in the data. End of explanation """ #turn feature sequences into text text_cols = ['auction', 'merchandise', 'device', 'timediff', 'country', 'ip', 'url'] for var in text_cols: df_bids_sorted[var] = var + "_" + df_bids_sorted[var].fillna("") text_str = var + '_text' count_str = var + '_nunique_num' bids[text_str] = df_bids_sorted.groupby('bidder_id')[var].apply(lambda x: "%s" % ' '.join(x)) bids[count_str] = df_bids_sorted.groupby('bidder_id')[var].nunique() """ Explanation: In the following, the main aggregation step for the bid data is performed. For each column, the data is first converted into slightly more readable form. For example for urls the data is transformed from the form 0esea7scvgr82he to the form url_0esea7scvgr82he. This was done to make the different entries more identifiable in the case of evaluating feature importances, but in the end this was not utilized to any significant extent. The entries for each bidder were concatenated with a space-delimiter to generate the aggregated text data. The result is a long string of space-delimited entries for each column. E.g. for the device-column, we can have a string of the type device_phone167 device_phone172 device_phone167 etc. In addition to generating the concatenated text data, the number of unique entries was also computed for each column and bidder. Throughout this notebook, we add a _text-suffix to text columns and a _num-suffix to numerical columns. This allows for the two types of columns to be selected with regular expressions later on. End of explanation """ max_time = df_bids_sorted.groupby('bidder_id')['time'].max() bids['maxtime_num'] = max_time min_time = df_bids_sorted.groupby('bidder_id')['time'].min() bids['mintime_num'] = min_time max_diff = df_bids_sorted.groupby('bidder_id')['timediff_num'].max() max_diff = max_diff.fillna(max_diff.mean()) bids['maxdiff_num'] = max_diff min_diff = df_bids_sorted.groupby('bidder_id')['timediff_num'].max() min_diff = min_diff.fillna(min_diff.mean()) bids['mindiff_num'] = min_diff range_diff = max_diff - min_diff bids['rangediff_num'] = range_diff mean_diff = df_bids_sorted.groupby('bidder_id')['timediff_num'].mean() mean_diff = mean_diff.fillna(mean_diff.mean()) bids['meandiff_num'] = mean_diff median_diff = df_bids_sorted.groupby('bidder_id')['timediff_num'].median() median_diff = median_diff.fillna(median_diff.mean()) bids['mediandiff_num'] = median_diff for q in [0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9]: q_string = 'diff_quantile_num_' + str(q).replace('.', '_') q_temp = df_bids_sorted.groupby('bidder_id')['timediff_num'].quantile(q) q_temp = q_temp.fillna(q_temp.mean()) bids[q_string] = q_temp """ Explanation: One idea I had was that the distribution of time differences between bids could be a significant predictor of bot activity. Hence, I computed a number of different descriptive statistics related to the times and time differences. In the following, the min and max times and time differences are computed along with the time difference range, mean, median and the first to ninth deciles. End of explanation """ df_train = pd.read_csv('data/train.csv') df_test = pd.read_csv('data/test.csv') df_combo = df_train.append(df_test) df_combo['address_text'] = 'address_' + df_combo['address'].fillna('') df_combo['account_text'] = 'account_' + df_combo['payment_account'].fillna('') df_combo = df_combo.merge(bids, how = 'left', left_on = ['bidder_id'], right_on = ['bidder_id']) """ Explanation: This completes the bid aggregation step. After this, we read the train- and test-data and merge it with the bid data. End of explanation """ del df_train del df_test del df_bids del df_bids_sorted del bids gc.collect(); """ Explanation: We delete the redundant dataframes and run garbage collection. End of explanation """ num_cols = filter(re.compile('num').search, df_combo.columns) text_cols = filter(re.compile('text').search, df_combo.columns) for col in num_cols: df_combo[col] = df_combo[col].fillna(df_combo[col].mean()) for col in text_cols: df_combo[col] = df_combo[col].fillna('') """ Explanation: Using regular expressions, the text and numeric columns are identified. For missing values in numeric columns, we fill in the column mean; for missing values in text columns, we fill in an empty string. End of explanation """ sample = pd.read_csv('submissions/sampleSubmission.csv') test_dat = df_combo[df_combo.bidder_id.isin(sample.bidder_id)] #test print (sample.bidder_id.values==test_dat['bidder_id'].values).all() """ Explanation: Now, we split the df_combo-dataframe into train and test data. First, we grab the columns for the test data and check that the order matches that in the sample submission. End of explanation """ train_dat = df_combo[~pd.isnull(df_combo.outcome)] y = train_dat.outcome.values xtrain = train_dat[num_cols].values xtest = test_dat[num_cols].values col_names = num_cols """ Explanation: We put the numeric columns into matrices xtrain and xtest. The processed sparse text frequency matrices will then be concatenated to them. We will also keep track of the feature names for model interpretation. End of explanation """ sc = MinMaxScaler().fit(np.vstack((xtrain, xtest))) xtrain = sc.transform(xtrain) xtest = sc.transform(xtest) """ Explanation: The data is scaled using scikit-learn's MinMaxScaler. We use the MinMaxScaler as it leads to non-negative values, which is useful later on as scikit-learn's chi2-feature selection function only works on non-negative data. End of explanation """ xtrain = sparse.csr_matrix(xtrain) xtest = sparse.csr_matrix(xtest) """ Explanation: As a final step before text processing, the numeric xtrain- and xtest-matrices are converted into sparse matrices to prepare for the concatenation with tfidf-matrices later on. End of explanation """ def tokens(x): return x.split(' ') """ Explanation: Text processing The text columns are processed using the tfidf-vectorizer in scikit-learn. First we define a custom tokenizer-function that only split on spaces: End of explanation """ text_params = {} text_params['address_text'] = {'include':False} text_params['account_text'] = {'include':False} text_params['auction_text'] = {'include':True, 'mindf':5, 'ngram':(1,3), 'token':'tokens'} text_params['merchandise_text'] = {'include':True, 'mindf':5, 'ngram':(1,3), 'token':'tokens'} text_params['device_text'] = {'include':True, 'mindf':5, 'ngram':(1,3), 'token':'tokens'} text_params['timediff_text'] = {'include':True, 'mindf':1, 'ngram':(1,1), 'token':'tokens'} text_params['country_text'] = {'include':True, 'mindf':5, 'ngram':(1,3), 'token':'tokens'} text_params['ip_text'] = {'include':True, 'mindf':1, 'ngram':(1,1), 'token':'nottokens'} text_params['url_text'] = {'include':True, 'mindf':5, 'ngram':(1,3), 'token':'tokens'} """ Explanation: We use slightly different tfidf-parameters for the different columns. These are stored in a text_params-dictionary. End of explanation """ for col in text_cols: if not text_params[col]['include']: continue else: if text_params[col]['token'] == 'tokens': vect = TfidfVectorizer(tokenizer = tokens, min_df = text_params[col]['mindf'], ngram_range = text_params[col]['ngram']) else: vect = TfidfVectorizer(min_df = text_params[col]['mindf'], ngram_range = text_params[col]['ngram']) documents = df_combo[col].values vect.fit(documents) col_names = col_names + vect.get_feature_names() xtr_tmp = vect.transform(train_dat[col].values) xte_tmp = vect.transform(test_dat[col].values) xtrain = sparse.hstack((xtrain, xtr_tmp)) xtest = sparse.hstack((xtest, xte_tmp)) """ Explanation: The parameter choices were based on some brief univariate tests of auc-performance. The address and account-fields were not found to be very useful and were dropped from the data. For all columns except for the timediff, the ngrams parameter is (1,3), i.e. we also consider bigrams and trigrams in addition to single tokens. For these columns, we should thereby be able to identify some sequential structure in the bid data. Interestingly, the ip_text-column had an improved auc when using the default tfidf-tokenizer instead of our custom tokens-tokenizer. The default-tokenizer also splits on punctuation and thereby splits the ip into its sub-addresses, which turned out to be useful for this modeling problem. For the other text columns, we use the space-delimited tokens-tokenization. With the following code snippet, the text columns were vectorized and concatenated to the xtrain- and xtest-matrices: End of explanation """ joblib.dump(xtrain, 'data/xtrain.pkl', compress = 3); joblib.dump(y, 'data/y.pkl', compress = 3); """ Explanation: Finally, we pickle and save the xtrain-matrix and the y-vector. These objects are required for running the grid search script hyperopt_xgb.py. End of explanation """ #feature selection feats_25 = SelectPercentile(chi2, 25).fit(xtrain, y) xtrain = feats_25.transform(xtrain) xtest = feats_25.transform(xtest) clf = xgb.XGBClassifier(objective = 'binary:logistic', learning_rate = 0.05, max_depth = 5, nthread = 8, seed = 42, subsample = 0.4, colsample_bytree = 0.7, min_child_weight = 1, n_estimators = 100, gamma = 0.15, silent = True) #bag of 15 models rounds = 15 preds_mat = np.zeros((len(sample.index), rounds)) for i in range(rounds): clf.set_params(seed = i + 1) clf.fit(xtrain, y) preds_tmp = clf.predict_proba(xtest)[:, 1] preds_mat[:, i] = preds_tmp bagged_preds = preds_mat.mean(axis = 1) sample.prediction = bagged_preds sample.to_csv('submissions/facebook_submission.csv', index = False) """ Explanation: Model For the final model, we perform a univariate chi2-feature selection to choose the top-25% of features. Then we fit 15 XGBoost-models to the data, where the only difference between the bagged models is the random seed. For the final submission, we take the average of the 15 models in the bag. End of explanation """ col_names = np.array(col_names)[feats_25.get_support()] """ Explanation: This submission should score around 0.93698 on the private leaderboard for a rank of 18th. The hyperparameters were determined through a grid search. In the Github-repository, there is a script called hyperopt_xgb.py that shows how this type of search could be performed. Model interpretation To end this notebook, a brief investigation of the feature importances of our model will be performed. To access the feature importances of an xgboost-model, we have to use the default python interface instead of the scikit-learn interface we have used so far. First, we exclude all the features that were not selected by our univariate feature selector feats_25: End of explanation """ xgb_params = {'objective': 'binary:logistic', 'eta': 0.05, 'max_depth': 5, 'seed': 42, 'subsample': 0.4, 'colsample_bytree': 0.7, 'min_child_weight': 1, 'gamma': 0.15} num_round = 100 dtrain = xgb.DMatrix(xtrain, label = y) booster = xgb.train(xgb_params, dtrain, num_round) """ Explanation: Then we fit an xgboost-model using the standard python interface: End of explanation """ importance = booster.get_fscore() df_imp = pd.DataFrame(columns = ('feature', 'importance')) sum_imp = 0 for imp in importance: row = col_names[int(imp[1:])], importance[imp] df_imp.loc[len(df_imp.index)] = row sum_imp += importance[imp] df_imp['relative_importance'] = df_imp.importance/float(sum_imp) df_imp.drop(['importance'], 1, inplace = True) df_imp.sort(columns=['relative_importance'], ascending = False, inplace = True) df_imp.index = df_imp.feature """ Explanation: Then we grab the feature importances from the booster and place them in a dataframe: End of explanation """ pd.options.mode.chained_assignment=None df_plot = df_imp[:20] names = {'country':'Country', 'num': 'Numeric', 'timediff': 'Time difference', 'device':'Device', 'url':'URL', 'ip':'IP address'} for pattern in sorted(names.keys()): df_plot[names[pattern]] = 0 if pattern == 'ip': rows = df_plot.feature.str.isnumeric() elif pattern == 'timediff': rows = df_plot.feature.str.contains(pattern) & ~df_plot.feature.str.contains('num') else: rows = df_plot.feature.str.contains(pattern) df_plot.loc[rows, names[pattern]] = df_plot.loc[rows, 'relative_importance'] df_plot = df_plot.drop(['feature', 'relative_importance'], 1) """ Explanation: We do some further processing of the df_imp-dataframe to get the dataframe ready for plotting. We will plot only the 20 most significant features. End of explanation """ import matplotlib.pyplot as plt import matplotlib %matplotlib inline %config InlineBackend.figure_format = 'svg' matplotlib.style.use('ggplot') matplotlib.rcParams.update({'font.size': 13}) matplotlib.rcParams['figure.figsize'] = 9, 6 ax = df_plot.plot(kind='barh', stacked = True) ax.invert_yaxis() plt.xlabel('Relative importance') plt.ylabel('Feature'); """ Explanation: Finally, we plot the feature importances: End of explanation """
fastai/course-v3
nbs/dl2/01_matmul.ipynb
apache-2.0
%load_ext autoreload %autoreload 2 %matplotlib inline """ Explanation: Matrix multiplication from foundations The foundations we'll assume throughout this course are: Python Python modules (non-DL) pytorch indexable tensor, and tensor creation (including RNGs - random number generators) fastai.datasets Check imports End of explanation """ #export from exp.nb_00 import * import operator def test(a,b,cmp,cname=None): if cname is None: cname=cmp.__name__ assert cmp(a,b),f"{cname}:\n{a}\n{b}" def test_eq(a,b): test(a,b,operator.eq,'==') test_eq(TEST,'test') # To run tests in console: # ! python run_notebook.py 01_matmul.ipynb """ Explanation: Jump_to lesson 8 video End of explanation """ #export from pathlib import Path from IPython.core.debugger import set_trace from fastai import datasets import pickle, gzip, math, torch, matplotlib as mpl import matplotlib.pyplot as plt from torch import tensor MNIST_URL='http://deeplearning.net/data/mnist/mnist.pkl' path = datasets.download_data(MNIST_URL, ext='.gz'); path with gzip.open(path, 'rb') as f: ((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1') x_train,y_train,x_valid,y_valid = map(tensor, (x_train,y_train,x_valid,y_valid)) n,c = x_train.shape x_train, x_train.shape, y_train, y_train.shape, y_train.min(), y_train.max() assert n==y_train.shape[0]==50000 test_eq(c,28*28) test_eq(y_train.min(),0) test_eq(y_train.max(),9) mpl.rcParams['image.cmap'] = 'gray' img = x_train[0] img.view(28,28).type() plt.imshow(img.view((28,28))); """ Explanation: Get data Jump_to lesson 8 video End of explanation """ weights = torch.randn(784,10) bias = torch.zeros(10) """ Explanation: Initial python model Jump_to lesson 8 video Jump_to lesson 8 video End of explanation """ def matmul(a,b): ar,ac = a.shape # n_rows * n_cols br,bc = b.shape assert ac==br c = torch.zeros(ar, bc) for i in range(ar): for j in range(bc): for k in range(ac): # or br c[i,j] += a[i,k] * b[k,j] return c m1 = x_valid[:5] m2 = weights m1.shape,m2.shape %time t1=matmul(m1, m2) t1.shape """ Explanation: Matrix multiplication End of explanation """ len(x_train) """ Explanation: This is kinda slow - what if we could speed it up by 50,000 times? Let's try! End of explanation """ a = tensor([10., 6, -4]) b = tensor([2., 8, 7]) a,b a + b (a < b).float().mean() m = tensor([[1., 2, 3], [4,5,6], [7,8,9]]); m """ Explanation: Elementwise ops Operators (+,-,*,/,>,<,==) are usually element-wise. Examples of element-wise operations: Jump_to lesson 8 video End of explanation """ (m*m).sum().sqrt() """ Explanation: Frobenius norm: $$\| A \|F = \left( \sum{i,j=1}^n | a_{ij} |^2 \right)^{1/2}$$ Hint: you don't normally need to write equations in LaTeX yourself, instead, you can click 'edit' in Wikipedia and copy the LaTeX from there (which is what I did for the above equation). Or on arxiv.org, click "Download: Other formats" in the top right, then "Download source"; rename the downloaded file to end in .tgz if it doesn't already, and you should find the source there, including the equations to copy and paste. End of explanation """ def matmul(a,b): ar,ac = a.shape br,bc = b.shape assert ac==br c = torch.zeros(ar, bc) for i in range(ar): for j in range(bc): # Any trailing ",:" can be removed c[i,j] = (a[i,:] * b[:,j]).sum() return c %timeit -n 10 _=matmul(m1, m2) 890.1/5 #export def near(a,b): return torch.allclose(a, b, rtol=1e-3, atol=1e-5) def test_near(a,b): test(a,b,near) test_near(t1,matmul(m1, m2)) """ Explanation: Elementwise matmul End of explanation """ a a > 0 """ Explanation: Broadcasting The term broadcasting describes how arrays with different shapes are treated during arithmetic operations. The term broadcasting was first used by Numpy. From the Numpy Documentation: The term broadcasting describes how numpy treats arrays with different shapes during arithmetic operations. Subject to certain constraints, the smaller array is “broadcast” across the larger array so that they have compatible shapes. Broadcasting provides a means of vectorizing array operations so that looping occurs in C instead of Python. It does this without making needless copies of data and usually leads to efficient algorithm implementations. In addition to the efficiency of broadcasting, it allows developers to write less code, which typically leads to fewer errors. This section was adapted from Chapter 4 of the fast.ai Computational Linear Algebra course. Jump_to lesson 8 video Broadcasting with a scalar End of explanation """ a + 1 m 2*m """ Explanation: How are we able to do a > 0? 0 is being broadcast to have the same dimensions as a. For instance you can normalize our dataset by subtracting the mean (a scalar) from the entire data set (a matrix) and dividing by the standard deviation (another scalar), using broadcasting. Other examples of broadcasting with a scalar: End of explanation """ c = tensor([10.,20,30]); c m m.shape,c.shape m + c c + m """ Explanation: Broadcasting a vector to a matrix We can also broadcast a vector to a matrix: End of explanation """ t = c.expand_as(m) t m + t t.storage() t.stride(), t.shape """ Explanation: We don't really copy the rows, but it looks as if we did. In fact, the rows are given a stride of 0. End of explanation """ c.unsqueeze(0) c.unsqueeze(1) m c.shape, c.unsqueeze(0).shape,c.unsqueeze(1).shape c.shape, c[None].shape,c[:,None].shape """ Explanation: You can index with the special value [None] or use unsqueeze() to convert a 1-dimensional array into a 2-dimensional array (although one of those dimensions has value 1). End of explanation """ c[None].shape,c[...,None].shape c[:,None].expand_as(m) m + c[:,None] c[:,None] """ Explanation: You can always skip trailling ':'s. And '...' means 'all preceding dimensions' End of explanation """ def matmul(a,b): ar,ac = a.shape br,bc = b.shape assert ac==br c = torch.zeros(ar, bc) for i in range(ar): # c[i,j] = (a[i,:] * b[:,j]).sum() # previous c[i] = (a[i ].unsqueeze(-1) * b).sum(dim=0) return c %timeit -n 10 _=matmul(m1, m2) 885000/277 test_near(t1, matmul(m1, m2)) """ Explanation: Matmul with broadcasting End of explanation """ c[None,:] c[None,:].shape c[:,None] c[:,None].shape c[None,:] * c[:,None] c[None] > c[:,None] """ Explanation: Broadcasting Rules End of explanation """ # c[i,j] += a[i,k] * b[k,j] # c[i,j] = (a[i,:] * b[:,j]).sum() def matmul(a,b): return torch.einsum('ik,kj->ij', a, b) %timeit -n 10 _=matmul(m1, m2) 885000/55 test_near(t1, matmul(m1, m2)) """ Explanation: When operating on two arrays/tensors, Numpy/PyTorch compares their shapes element-wise. It starts with the trailing dimensions, and works its way forward. Two dimensions are compatible when they are equal, or one of them is 1, in which case that dimension is broadcasted to make it the same size Arrays do not need to have the same number of dimensions. For example, if you have a 256*256*3 array of RGB values, and you want to scale each color in the image by a different value, you can multiply the image by a one-dimensional array with 3 values. Lining up the sizes of the trailing axes of these arrays according to the broadcast rules, shows that they are compatible: Image (3d array): 256 x 256 x 3 Scale (1d array): 3 Result (3d array): 256 x 256 x 3 The numpy documentation includes several examples of what dimensions can and can not be broadcast together. Einstein summation Einstein summation (einsum) is a compact representation for combining products and sums in a general way. From the numpy docs: "The subscripts string is a comma-separated list of subscript labels, where each label refers to a dimension of the corresponding operand. Whenever a label is repeated it is summed, so np.einsum('i,i', a, b) is equivalent to np.inner(a,b). If a label appears only once, it is not summed, so np.einsum('i', a) produces a view of a with no changes." Jump_to lesson 8 video End of explanation """ %timeit -n 10 t2 = m1.matmul(m2) # time comparison vs pure python: 885000/18 t2 = m1@m2 test_near(t1, t2) m1.shape,m2.shape """ Explanation: pytorch op We can use pytorch's function or operator directly for matrix multiplication. Jump_to lesson 8 video End of explanation """ !python notebook2script.py 01_matmul.ipynb """ Explanation: Export End of explanation """
machinelearningnanodegree/stanford-cs231
solutions/levin/assignment2/FullyConnectedNets.ipynb
mit
# As usual, a bit of setup import sys import os sys.path.insert(0, os.path.abspath('..')) import time import numpy as np import matplotlib.pyplot as plt from cs231n.classifiers.fc_net import * from cs231n.data_utils import get_CIFAR10_data from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array from cs231n.solver import Solver %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def rel_error(x, y): """ returns relative error """ return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) # Load the (preprocessed) CIFAR10 data. data = get_CIFAR10_data() for k, v in data.iteritems(): print '%s: ' % k, v.shape """ Explanation: Fully-Connected Neural Nets In the previous homework you implemented a fully-connected two-layer neural network on CIFAR-10. The implementation was simple but not very modular since the loss and gradient were computed in a single monolithic function. This is manageable for a simple two-layer network, but would become impractical as we move to bigger models. Ideally we want to build networks using a more modular design so that we can implement different layer types in isolation and then snap them together into models with different architectures. In this exercise we will implement fully-connected networks using a more modular approach. For each layer we will implement a forward and a backward function. The forward function will receive inputs, weights, and other parameters and will return both an output and a cache object storing data needed for the backward pass, like this: ```python def layer_forward(x, w): """ Receive inputs x and weights w """ # Do some computations ... z = # ... some intermediate value # Do some more computations ... out = # the output cache = (x, w, z, out) # Values we need to compute gradients return out, cache ``` The backward pass will receive upstream derivatives and the cache object, and will return gradients with respect to the inputs and weights, like this: ```python def layer_backward(dout, cache): """ Receive derivative of loss with respect to outputs and cache, and compute derivative with respect to inputs. """ # Unpack cache values x, w, z, out = cache # Use values in cache to compute derivatives dx = # Derivative of loss with respect to x dw = # Derivative of loss with respect to w return dx, dw ``` After implementing a bunch of layers this way, we will be able to easily combine them to build classifiers with different architectures. In addition to implementing fully-connected networks of arbitrary depth, we will also explore different update rules for optimization, and introduce Dropout as a regularizer and Batch Normalization as a tool to more efficiently optimize deep networks. End of explanation """ # Test the affine_forward function num_inputs = 2 input_shape = (4, 5, 6) output_dim = 3 input_size = num_inputs * np.prod(input_shape) weight_size = output_dim * np.prod(input_shape) x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape) w = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim) b = np.linspace(-0.3, 0.1, num=output_dim) out, _ = affine_forward(x, w, b) correct_out = np.array([[ 1.49834967, 1.70660132, 1.91485297], [ 3.25553199, 3.5141327, 3.77273342]]) # Compare your output with ours. The error should be around 1e-9. print 'Testing affine_forward function:' print 'difference: ', rel_error(out, correct_out) """ Explanation: Affine layer: foward Open the file cs231n/layers.py and implement the affine_forward function. Once you are done you can test your implementaion by running the following: End of explanation """ # Test the affine_backward function x = np.random.randn(10, 2, 3) w = np.random.randn(6, 5) b = np.random.randn(5) dout = np.random.randn(10, 5) dx_num = eval_numerical_gradient_array(lambda x: affine_forward(x, w, b)[0], x, dout) dw_num = eval_numerical_gradient_array(lambda w: affine_forward(x, w, b)[0], w, dout) db_num = eval_numerical_gradient_array(lambda b: affine_forward(x, w, b)[0], b, dout) _, cache = affine_forward(x, w, b) dx, dw, db = affine_backward(dout, cache) # The error should be around 1e-10 print 'Testing affine_backward function:' print 'dx error: ', rel_error(dx_num, dx) print 'dw error: ', rel_error(dw_num, dw) print 'db error: ', rel_error(db_num, db) """ Explanation: Affine layer: backward Now implement the affine_backward function and test your implementation using numeric gradient checking. End of explanation """ # Test the relu_forward function x = np.linspace(-0.5, 0.5, num=12).reshape(3, 4) out, _ = relu_forward(x) correct_out = np.array([[ 0., 0., 0., 0., ], [ 0., 0., 0.04545455, 0.13636364,], [ 0.22727273, 0.31818182, 0.40909091, 0.5, ]]) # Compare your output with ours. The error should be around 1e-8 print 'Testing relu_forward function:' print 'difference: ', rel_error(out, correct_out) """ Explanation: ReLU layer: forward Implement the forward pass for the ReLU activation function in the relu_forward function and test your implementation using the following: End of explanation """ x = np.random.randn(10, 10) dout = np.random.randn(*x.shape) dx_num = eval_numerical_gradient_array(lambda x: relu_forward(x)[0], x, dout) _, cache = relu_forward(x) dx = relu_backward(dout, cache) # The error should be around 1e-12 print 'Testing relu_backward function:' print 'dx error: ', rel_error(dx_num, dx) """ Explanation: ReLU layer: backward Now implement the backward pass for the ReLU activation function in the relu_backward function and test your implementation using numeric gradient checking: End of explanation """ from cs231n.layer_utils import affine_relu_forward, affine_relu_backward x = np.random.randn(2, 3, 4) w = np.random.randn(12, 10) b = np.random.randn(10) dout = np.random.randn(2, 10) out, cache = affine_relu_forward(x, w, b) dx, dw, db = affine_relu_backward(dout, cache) dx_num = eval_numerical_gradient_array(lambda x: affine_relu_forward(x, w, b)[0], x, dout) dw_num = eval_numerical_gradient_array(lambda w: affine_relu_forward(x, w, b)[0], w, dout) db_num = eval_numerical_gradient_array(lambda b: affine_relu_forward(x, w, b)[0], b, dout) print 'Testing affine_relu_forward:' print 'dx error: ', rel_error(dx_num, dx) print 'dw error: ', rel_error(dw_num, dw) print 'db error: ', rel_error(db_num, db) """ Explanation: "Sandwich" layers There are some common patterns of layers that are frequently used in neural nets. For example, affine layers are frequently followed by a ReLU nonlinearity. To make these common patterns easy, we define several convenience layers in the file cs231n/layer_utils.py. For now take a look at the affine_relu_forward and affine_relu_backward functions, and run the following to numerically gradient check the backward pass: End of explanation """ num_classes, num_inputs = 10, 50 x = 0.001 * np.random.randn(num_inputs, num_classes) y = np.random.randint(num_classes, size=num_inputs) dx_num = eval_numerical_gradient(lambda x: svm_loss(x, y)[0], x, verbose=False) loss, dx = svm_loss(x, y) # Test svm_loss function. Loss should be around 9 and dx error should be 1e-9 print 'Testing svm_loss:' print 'loss: ', loss print 'dx error: ', rel_error(dx_num, dx) dx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, verbose=False) loss, dx = softmax_loss(x, y) # Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8 print '\nTesting softmax_loss:' print 'loss: ', loss print 'dx error: ', rel_error(dx_num, dx) """ Explanation: Loss layers: Softmax and SVM You implemented these loss functions in the last assignment, so we'll give them to you for free here. You should still make sure you understand how they work by looking at the implementations in cs231n/layers.py. You can make sure that the implementations are correct by running the following: End of explanation """ N, D, H, C = 3, 5, 50, 7 X = np.random.randn(N, D) y = np.random.randint(C, size=N) std = 1e-2 model = TwoLayerNet(input_dim=D, hidden_dim=H, num_classes=C, weight_scale=std) print 'Testing initialization ... ' W1_std = abs(model.params['W1'].std() - std) b1 = model.params['b1'] W2_std = abs(model.params['W2'].std() - std) b2 = model.params['b2'] assert W1_std < std / 10, 'First layer weights do not seem right' assert np.all(b1 == 0), 'First layer biases do not seem right' assert W2_std < std / 10, 'Second layer weights do not seem right' assert np.all(b2 == 0), 'Second layer biases do not seem right' print 'Testing test-time forward pass ... ' model.params['W1'] = np.linspace(-0.7, 0.3, num=D*H).reshape(D, H) model.params['b1'] = np.linspace(-0.1, 0.9, num=H) model.params['W2'] = np.linspace(-0.3, 0.4, num=H*C).reshape(H, C) model.params['b2'] = np.linspace(-0.9, 0.1, num=C) X = np.linspace(-5.5, 4.5, num=N*D).reshape(D, N).T scores = model.loss(X) correct_scores = np.asarray( [[11.53165108, 12.2917344, 13.05181771, 13.81190102, 14.57198434, 15.33206765, 16.09215096], [12.05769098, 12.74614105, 13.43459113, 14.1230412, 14.81149128, 15.49994135, 16.18839143], [12.58373087, 13.20054771, 13.81736455, 14.43418138, 15.05099822, 15.66781506, 16.2846319 ]]) scores_diff = np.abs(scores - correct_scores).sum() assert scores_diff < 1e-6, 'Problem with test-time forward pass' print 'Testing training loss (no regularization)' y = np.asarray([0, 5, 1]) loss, grads = model.loss(X, y) correct_loss = 3.4702243556 assert abs(loss - correct_loss) < 1e-10, 'Problem with training-time loss' model.reg = 1.0 loss, grads = model.loss(X, y) correct_loss = 26.5948426952 assert abs(loss - correct_loss) < 1e-10, 'Problem with regularization loss' for reg in [0.0, 0.7]: print 'Running numeric gradient check with reg = ', reg model.reg = reg loss, grads = model.loss(X, y) for name in sorted(grads): f = lambda _: model.loss(X, y)[0] grad_num = eval_numerical_gradient(f, model.params[name], verbose=False) print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])) """ Explanation: Two-layer network In the previous assignment you implemented a two-layer neural network in a single monolithic class. Now that you have implemented modular versions of the necessary layers, you will reimplement the two layer network using these modular implementations. Open the file cs231n/classifiers/fc_net.py and complete the implementation of the TwoLayerNet class. This class will serve as a model for the other networks you will implement in this assignment, so read through it to make sure you understand the API. You can run the cell below to test your implementation. End of explanation """ # model = TwoLayerNet() # solver = None ############################################################################## # TODO: Use a Solver instance to train a TwoLayerNet that achieves at least # # 50% accuracy on the validation set. # ############################################################################## input_dim=3*32*32 hidden_dim=100 num_classes=10 weight_scale=1e-3 reg=0.0 model = TwoLayerNet(input_dim=input_dim, hidden_dim=hidden_dim, num_classes=num_classes, weight_scale=weight_scale, reg=reg) solver = Solver(model, data, update_rule='sgd', optim_config={ 'learning_rate': 1e-3, }, lr_decay=0.95, num_epochs=10, batch_size=100, print_every=100) solver.train() ############################################################################## # END OF YOUR CODE # ############################################################################## # Run this cell to visualize training loss and train / val accuracy plt.subplot(2, 1, 1) plt.title('Training loss') plt.plot(solver.loss_history, 'o') plt.xlabel('Iteration') plt.subplot(2, 1, 2) plt.title('Accuracy') plt.plot(solver.train_acc_history, '-o', label='train') plt.plot(solver.val_acc_history, '-o', label='val') plt.plot([0.5] * len(solver.val_acc_history), 'k--') plt.xlabel('Epoch') plt.legend(loc='lower right') plt.gcf().set_size_inches(15, 12) plt.show() """ Explanation: Solver In the previous assignment, the logic for training models was coupled to the models themselves. Following a more modular design, for this assignment we have split the logic for training models into a separate class. Open the file cs231n/solver.py and read through it to familiarize yourself with the API. After doing so, use a Solver instance to train a TwoLayerNet that achieves at least 50% accuracy on the validation set. End of explanation """ N, D, H1, H2, C = 2, 15, 20, 30, 10 X = np.random.randn(N, D) y = np.random.randint(C, size=(N,)) for reg in [0, 3.14]: print 'Running check with reg = ', reg model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C, reg=reg, weight_scale=5e-2, dtype=np.float64) loss, grads = model.loss(X, y) print 'Initial loss: ', loss for name in sorted(grads): f = lambda _: model.loss(X, y)[0] grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5) print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])) """ Explanation: Multilayer network Next you will implement a fully-connected network with an arbitrary number of hidden layers. Read through the FullyConnectedNet class in the file cs231n/classifiers/fc_net.py. Implement the initialization, the forward pass, and the backward pass. For the moment don't worry about implementing dropout or batch normalization; we will add those features soon. Initial loss and gradient check As a sanity check, run the following to check the initial loss and to gradient check the network both with and without regularization. Do the initial losses seem reasonable? For gradient checking, you should expect to see errors around 1e-6 or less. End of explanation """ # TODO: Use a three-layer Net to overfit 50 training examples. num_train = 50 small_data = { 'X_train': data['X_train'][:num_train], 'y_train': data['y_train'][:num_train], 'X_val': data['X_val'], 'y_val': data['y_val'], } learning_rate = 1e-2 weight_scale = 1e-2 model = FullyConnectedNet([100, 100], weight_scale=weight_scale, dtype=np.float64) solver = Solver(model, small_data, print_every=10, num_epochs=20, batch_size=25, update_rule='sgd', optim_config={ 'learning_rate': learning_rate, } ) solver.train() plt.plot(solver.loss_history, 'o') plt.title('Training loss history') plt.xlabel('Iteration') plt.ylabel('Training loss') plt.show() """ Explanation: As another sanity check, make sure you can overfit a small dataset of 50 images. First we will try a three-layer network with 100 units in each hidden layer. You will need to tweak the learning rate and initialization scale, but you should be able to overfit and achieve 100% training accuracy within 20 epochs. End of explanation """ # TODO: Use a five-layer Net to overfit 50 training examples. num_train = 50 small_data = { 'X_train': data['X_train'][:num_train], 'y_train': data['y_train'][:num_train], 'X_val': data['X_val'], 'y_val': data['y_val'], } learning_rate = 1e-2 weight_scale = 6e-2 model = FullyConnectedNet([100, 100, 100, 100], weight_scale=weight_scale, dtype=np.float64) solver = Solver(model, small_data, print_every=10, num_epochs=20, batch_size=25, update_rule='sgd', optim_config={ 'learning_rate': learning_rate, } ) solver.train() plt.plot(solver.loss_history, 'o') plt.title('Training loss history') plt.xlabel('Iteration') plt.ylabel('Training loss') plt.show() """ Explanation: Now try to use a five-layer network with 100 units on each layer to overfit 50 training examples. Again you will have to adjust the learning rate and weight initialization, but you should be able to achieve 100% training accuracy within 20 epochs. End of explanation """ from cs231n.optim import sgd_momentum N, D = 4, 5 w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D) dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D) v = np.linspace(0.6, 0.9, num=N*D).reshape(N, D) config = {'learning_rate': 1e-3, 'velocity': v} next_w, _ = sgd_momentum(w, dw, config=config) expected_next_w = np.asarray([ [ 0.1406, 0.20738947, 0.27417895, 0.34096842, 0.40775789], [ 0.47454737, 0.54133684, 0.60812632, 0.67491579, 0.74170526], [ 0.80849474, 0.87528421, 0.94207368, 1.00886316, 1.07565263], [ 1.14244211, 1.20923158, 1.27602105, 1.34281053, 1.4096 ]]) expected_velocity = np.asarray([ [ 0.5406, 0.55475789, 0.56891579, 0.58307368, 0.59723158], [ 0.61138947, 0.62554737, 0.63970526, 0.65386316, 0.66802105], [ 0.68217895, 0.69633684, 0.71049474, 0.72465263, 0.73881053], [ 0.75296842, 0.76712632, 0.78128421, 0.79544211, 0.8096 ]]) print 'next_w error: ', rel_error(next_w, expected_next_w) print 'velocity error: ', rel_error(expected_velocity, config['velocity']) """ Explanation: Inline question: Did you notice anything about the comparative difficulty of training the three-layer net vs training the five layer net? Answer: It's much harder to find the right weight initialization and learning rate for five layer net. As the network grows deeper, we tend to have more dead activations, and thus kill the backward gradient. Update rules So far we have used vanilla stochastic gradient descent (SGD) as our update rule. More sophisticated update rules can make it easier to train deep networks. We will implement a few of the most commonly used update rules and compare them to vanilla SGD. SGD+Momentum Stochastic gradient descent with momentum is a widely used update rule that tends to make deep networks converge faster than vanilla stochstic gradient descent. Open the file cs231n/optim.py and read the documentation at the top of the file to make sure you understand the API. Implement the SGD+momentum update rule in the function sgd_momentum and run the following to check your implementation. You should see errors less than 1e-8. End of explanation """ num_train = 4000 small_data = { 'X_train': data['X_train'][:num_train], 'y_train': data['y_train'][:num_train], 'X_val': data['X_val'], 'y_val': data['y_val'], } solvers = {} for update_rule in ['sgd', 'sgd_momentum']: print 'running with ', update_rule model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2) solver = Solver(model, small_data, num_epochs=5, batch_size=100, update_rule=update_rule, optim_config={ 'learning_rate': 1e-2, }, verbose=True) solvers[update_rule] = solver solver.train() print plt.subplot(3, 1, 1) plt.title('Training loss') plt.xlabel('Iteration') plt.subplot(3, 1, 2) plt.title('Training accuracy') plt.xlabel('Epoch') plt.subplot(3, 1, 3) plt.title('Validation accuracy') plt.xlabel('Epoch') for update_rule, solver in solvers.iteritems(): plt.subplot(3, 1, 1) plt.plot(solver.loss_history, 'o', label=update_rule) plt.subplot(3, 1, 2) plt.plot(solver.train_acc_history, '-o', label=update_rule) plt.subplot(3, 1, 3) plt.plot(solver.val_acc_history, '-o', label=update_rule) for i in [1, 2, 3]: plt.subplot(3, 1, i) plt.legend(loc='upper center', ncol=4) plt.gcf().set_size_inches(15, 15) plt.show() """ Explanation: Once you have done so, run the following to train a six-layer network with both SGD and SGD+momentum. You should see the SGD+momentum update rule converge faster. End of explanation """ # Test RMSProp implementation; you should see errors less than 1e-7 from cs231n.optim import rmsprop N, D = 4, 5 w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D) dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D) cache = np.linspace(0.6, 0.9, num=N*D).reshape(N, D) config = {'learning_rate': 1e-2, 'cache': cache} next_w, _ = rmsprop(w, dw, config=config) expected_next_w = np.asarray([ [-0.39223849, -0.34037513, -0.28849239, -0.23659121, -0.18467247], [-0.132737, -0.08078555, -0.02881884, 0.02316247, 0.07515774], [ 0.12716641, 0.17918792, 0.23122175, 0.28326742, 0.33532447], [ 0.38739248, 0.43947102, 0.49155973, 0.54365823, 0.59576619]]) expected_cache = np.asarray([ [ 0.5976, 0.6126277, 0.6277108, 0.64284931, 0.65804321], [ 0.67329252, 0.68859723, 0.70395734, 0.71937285, 0.73484377], [ 0.75037008, 0.7659518, 0.78158892, 0.79728144, 0.81302936], [ 0.82883269, 0.84469141, 0.86060554, 0.87657507, 0.8926 ]]) print 'next_w error: ', rel_error(expected_next_w, next_w) print 'cache error: ', rel_error(expected_cache, config['cache']) # Test Adam implementation; you should see errors around 1e-7 or less from cs231n.optim import adam N, D = 4, 5 w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D) dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D) m = np.linspace(0.6, 0.9, num=N*D).reshape(N, D) v = np.linspace(0.7, 0.5, num=N*D).reshape(N, D) config = {'learning_rate': 1e-2, 'm': m, 'v': v, 't': 5} next_w, _ = adam(w, dw, config=config) expected_next_w = np.asarray([ [-0.40094747, -0.34836187, -0.29577703, -0.24319299, -0.19060977], [-0.1380274, -0.08544591, -0.03286534, 0.01971428, 0.0722929], [ 0.1248705, 0.17744702, 0.23002243, 0.28259667, 0.33516969], [ 0.38774145, 0.44031188, 0.49288093, 0.54544852, 0.59801459]]) expected_v = np.asarray([ [ 0.69966, 0.68908382, 0.67851319, 0.66794809, 0.65738853,], [ 0.64683452, 0.63628604, 0.6257431, 0.61520571, 0.60467385,], [ 0.59414753, 0.58362676, 0.57311152, 0.56260183, 0.55209767,], [ 0.54159906, 0.53110598, 0.52061845, 0.51013645, 0.49966, ]]) expected_m = np.asarray([ [ 0.48, 0.49947368, 0.51894737, 0.53842105, 0.55789474], [ 0.57736842, 0.59684211, 0.61631579, 0.63578947, 0.65526316], [ 0.67473684, 0.69421053, 0.71368421, 0.73315789, 0.75263158], [ 0.77210526, 0.79157895, 0.81105263, 0.83052632, 0.85 ]]) print 'next_w error: ', rel_error(expected_next_w, next_w) print 'v error: ', rel_error(expected_v, config['v']) print 'm error: ', rel_error(expected_m, config['m']) """ Explanation: RMSProp and Adam RMSProp [1] and Adam [2] are update rules that set per-parameter learning rates by using a running average of the second moments of gradients. In the file cs231n/optim.py, implement the RMSProp update rule in the rmsprop function and implement the Adam update rule in the adam function, and check your implementations using the tests below. [1] Tijmen Tieleman and Geoffrey Hinton. "Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude." COURSERA: Neural Networks for Machine Learning 4 (2012). [2] Diederik Kingma and Jimmy Ba, "Adam: A Method for Stochastic Optimization", ICLR 2015. End of explanation """ learning_rates = {'rmsprop': 1e-4, 'adam': 1e-3} for update_rule in ['adam', 'rmsprop']: print 'running with ', update_rule model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2) solver = Solver(model, small_data, num_epochs=5, batch_size=100, update_rule=update_rule, optim_config={ 'learning_rate': learning_rates[update_rule] }, verbose=True) solvers[update_rule] = solver solver.train() print plt.subplot(3, 1, 1) plt.title('Training loss') plt.xlabel('Iteration') plt.subplot(3, 1, 2) plt.title('Training accuracy') plt.xlabel('Epoch') plt.subplot(3, 1, 3) plt.title('Validation accuracy') plt.xlabel('Epoch') for update_rule, solver in solvers.iteritems(): plt.subplot(3, 1, 1) plt.plot(solver.loss_history, 'o', label=update_rule) plt.subplot(3, 1, 2) plt.plot(solver.train_acc_history, '-o', label=update_rule) plt.subplot(3, 1, 3) plt.plot(solver.val_acc_history, '-o', label=update_rule) for i in [1, 2, 3]: plt.subplot(3, 1, i) plt.legend(loc='upper center', ncol=4) plt.gcf().set_size_inches(15, 15) plt.show() """ Explanation: Once you have debugged your RMSProp and Adam implementations, run the following to train a pair of deep networks using these new update rules: End of explanation """ best_model = None ################################################################################ # TODO: Train the best FullyConnectedNet that you can on CIFAR-10. You might # # batch normalization and dropout useful. Store your best model in the # # best_model variable. # ################################################################################ num_train = data['X_train'].shape[0] small_data = { 'X_train': data['X_train'][:num_train], 'y_train': data['y_train'][:num_train], 'X_val': data['X_val'], 'y_val': data['y_val'], } dropout=0.1 model = FullyConnectedNet([100, 100, 100], weight_scale=5e-2, use_batchnorm=True, dropout=dropout) update_rule = 'adam' learning_rate = 1e-3 solver = Solver(model, small_data, num_epochs=5, batch_size=100, update_rule=update_rule, optim_config={ 'learning_rate': learning_rate }, verbose=True) solver.train() best_model = model ################################################################################ # END OF YOUR CODE # ################################################################################ """ Explanation: Train a good model! Train the best fully-connected model that you can on CIFAR-10, storing your best model in the best_model variable. We require you to get at least 50% accuracy on the validation set using a fully-connected net. If you are careful it should be possible to get accuracies above 55%, but we don't require it for this part and won't assign extra credit for doing so. Later in the assignment we will ask you to train the best convolutional network that you can on CIFAR-10, and we would prefer that you spend your effort working on convolutional nets rather than fully-connected nets. You might find it useful to complete the BatchNormalization.ipynb and Dropout.ipynb notebooks before completing this part, since those techniques can help you train powerful models. End of explanation """ X_test = data['X_test'] y_test = data['y_test'] X_val = data['X_val'] y_val = data['y_val'] y_test_pred = np.argmax(best_model.loss(X_test), axis=1) y_val_pred = np.argmax(best_model.loss(X_val), axis=1) print 'Validation set accuracy: ', (y_val_pred == y_val).mean() print 'Test set accuracy: ', (y_test_pred == y_test).mean() """ Explanation: Test you model Run your best model on the validation and test sets. You should achieve above 50% accuracy on the validation set. End of explanation """
ajgpitch/qutip-notebooks
docs/guide/BasicOperations.ipynb
lgpl-3.0
from qutip import * """ Explanation: Basic Operations on Quantum Objects Contents First Things First The Qobj Class Functions Acting on the Qobj Class <a id='first'></a> First Things First <br> <div class="warn"> **Warning**: Do not run QuTiP from the installation directory. </div> In order to load the QuTiP library we must first call the import statement: End of explanation """ import numpy as np import pylab as plt """ Explanation: In addition, it is often necessary to load addition mathematical functions and plotting features from the NumPy and Matplotlib libraries, respectively: End of explanation """ Qobj() """ Explanation: Here we have imported the Numpy package as "np" so that the functions included in this module can be called using the np.func() syntax. In addition, the plotting functions from Pylab can be called via e.g. plt.plot(). It is important to note that if we choose to import all of the functions directly from the modules using python from numpy import * from pylab import * then the order in which we import QuTiP and these additional libraries is important. In general, if using this import style, you must import QuTiP last: python from numpy import * from pylab import * from qutip import * <a id='qobj'></a> The Quantum Object Class Introduction The key difference between classical and quantum mechanics lies in the use of operators instead of numbers as variables. Moreover, we need to specify state vectors and their properties. Therefore, in computing the dynamics of quantum systems we need a data structure that is capable of encapsulating the properties of a quantum operator and ket/bra vectors. The quantum object class, Qobj, accomplishes this using a sparse matrix representation. To begin, let us create a blank Qobj: End of explanation """ x = np.array([[1, 2, 3, 4, 5]]) Qobj(x) r = np.random.rand(4, 4) Qobj(r) """ Explanation: where we see the blank Qobj object with dimensions, shape, and data. Here the data corresponds to a 1x1-dimensional sparse matrix consisting of a single zero entry. <div class="info"> **Hint**: By convention, Class objects in Python such as ``Qobj()`` differ from functions in the use of a beginning capital letter. </div> We can create a Qobj with a user defined data set by passing a list or array of data into the Qobj: End of explanation """ basis(5,3) coherent(5,0.5-0.5j) destroy(4) sigmaz() jmat(5/2.0,'+') """ Explanation: Notice how both the dims and shape change according to the input data. Although dims and shape appear to have the same function, the difference will become quite clear in the section on tensor products and partial traces. <div class="info"> **Hint**: If you are running QuTiP from a python script you must use the `print` function to view the Qobj attributes. </div> States and Operators Manually specifying the data for each quantum object is inefficient. Even more so when most objects correspond to commonly used types such as the ladder operators of a harmonic oscillator, the Pauli spin operators for a two-level system, or state vectors such as Fock states. Therefore, QuTiP includes predefined objects for a variety of states: <table> <tr> <th>States</th> <th>Command (# = optional)</th> <th>Inputs</th> </tr> <tr> <td>Fock State</td> <td>`basis(N, #m)` or `fock(N, #m)`</td> <td>N = # of levels in Hilbert space, m = level containing excitation (0 if m not given).</td> </tr> <tr> <td>Fock State Density Matrix</td> <td>`fock_dm(N, #m)`</td> <td>Same as above.</td> </tr> <tr> <td>Coherent State</td> <td>`coherent(N, alpha)`</td> <td>alpha = complex number (eigenvalue) defining coherent state.</td> </tr> <tr> <td>Coherent State Density Matrix</td> <td>`coherent_dm(N, alpha)`</td> <td>Same as above.</td> </tr> <tr> <td>Thermal State Density Matrix</td> <td>`thermal_dm(N, n)`</td> <td>n = particle number expectation value.</td> </tr> </table> and operators: <table> <tr> <th>Operator</th> <th>Command (# = optional)</th> <th>Inputs</th> </tr> <tr> <td>Identity</td> <td>`qeye(N)` or `identity(N)`</td> <td>N = # of levels in Hilbert space.</td> </tr> <tr> <td>Lowering (destruction) Operator</td> <td>`destroy(N)`</td> <td></td> </tr> <tr> <td>Raising (creation) Operator</td> <td>`create(N)`</td> <td></td> </tr> <tr> <td>Number Operator</td> <td>`num(N)`</td> <td></td> </tr> <tr> <td>Single-Mode Displacement Operator</td> <td>`displace(N, alpha)`</td> <td>alpha = Complex displacement amplitude.</td> </tr> <tr> <td>Single-Mode Squeezing Operator</td> <td>`squeeze(N, sp)`</td> <td>sp = Squeezing parameter.</td> </tr> <tr> <td>Pauli Pauli X-Operator (sigma-x)</td> <td>`sigmax()`</td> <td></td> </tr> <tr> <td>Pauli Spin Y-Operator (sigma-y)</td> <td>`sigmay()`</td> <td></td> </tr> <tr> <td>Pauli Spin Z-Operator (sigma-z)</td> <td>`sigmaz()`</td> <td></td> </tr> <tr> <td>Spin Raising Operator (sigma-plus)</td> <td>`sigmap()`</td> <td></td> </tr> <tr> <td>Spin Lowering Operator (sigma-minus)</td> <td>`sigmam()`</td> <td><br></td> </tr> <tr> <td>Higher-Spin Operators</td> <td>`jmat(j, #s)`</td> <td>j = int or half-int representing spin. s= 'x', 'y', 'z', '+', or '-'.</td> </tr> </table> As an example, we give the output for a few of these functions: End of explanation """ q = destroy(4) q.dims q.shape """ Explanation: Qobj Attributes We have seen that a quantum object has several internal attributes, such as data, dims, and shape. These can be accessed in the following way: End of explanation """ q.type q.isherm q.data """ Explanation: In general, the attributes (properties) of a Qobj object (or any Python class) can be retrieved using the Q.attribute notation. In addition to the attributes shown with the print function, the Qobj class also has the following: <table> <tr> <th>Property</th> <th>Attribute</th> <th>Description</th> </tr> <tr> <td>Data</td> <td>Q.data</td> <td>Sparse matrix representing quantum state or operator.</td> </tr> <tr> <td>Dimensions</td> <td>Q.dims</td> <td>List keeping track of shapes for individual components of a multipartite system (for tensor products and partial traces).</td> </tr> <tr> <td>Shape</td> <td>Q.shape</td> <td>Dimensions of underlying data matrix.</td> </tr> <tr> <td>is Hermitian?</td> <td>Q.isherm</td> <td>Is operator Hermitian or not?</td> </tr> <tr> <td>Type</td> <td>Q.type</td> <td></td> </tr> </table> <center> <img src='images/BasicOperations/quide-basics-qobj-box.png'> <p>The `Qobj` class viwed as a container for the properties needed to characterize a quantum operator or state vector.</p> </center> For the destruction operator above: End of explanation """ q = destroy(4) x = sigmax() q + 5 x * x q ** 3 x / np.sqrt(2) """ Explanation: The data attribute returns a message stating that the data is a sparse matrix. All Qobj instances store their data as a sparse matrix to save memory. To access the underlying dense matrix one needs to use the Q.full() function as described below. Qobj Math The rules for mathematical operations on Qobj instances are similar to standard matrix arithmetic: End of explanation """ q * x """ Explanation: Of course, like matrices, multiplying two objects of incompatible shape throws an error: End of explanation """ basis(5, 3) basis(5, 3).dag() coherent_dm(5, 1) coherent_dm(5, 1).diag() coherent_dm(5, 1).full() coherent_dm(5, 1).norm() coherent_dm(5, 1).sqrtm() coherent_dm(5, 1).tr() (basis(4, 2) + basis(4, 1)).unit() from IPython.core.display import HTML def css_styling(): styles = open("../styles/guide.css", "r").read() return HTML(styles) css_styling() """ Explanation: In addition, the logic operators is equal == and is not equal != are also supported. <a id='functions'></a> Functions Acting on the Qobj Class Like attributes, the quantum object class has defined functions (methods) that operate on Qobj class instances. For a general quantum object Q: <table> <tr> <th>Function</th> <th>Command</th> <th>Description</th> </tr> <tr> <td>Hermicity Check</td> <td>`Q.check_herm()`</td> <td>Check if Qobj is Hermitian.</td> </tr> <tr> <td>Conjugate</td> <td>`Q.conj()`</td> <td>Conjugate of Qobj.</td> </tr> <tr> <td>Dagger (adjoint)</td> <td>`Q.dag()`</td> <td>Adjoint of Qobj.</td> </tr> <tr> <td>Diagonal</td> <td>`Q.diag()`</td> <td>Returns array of diagonal elements.</td> </tr> <tr> <td>Eigenenergies</td> <td>`Q.eigenenergies()`</td> <td>Eigenenergies (values) of a Qobj.</td> </tr> <tr> <td>Eliminate States</td> <td>`Q.eliminate_states(inds)`</td> <td>Qobj with states is list 'inds' removed.</td> </tr> <tr> <td>Exponential</td> <td>`Q.expm()`</td> <td>Matrix exponential of Qobj.</td> </tr> <tr> <td>Extract States</td> <td>`Q.extract_states(inds)`</td> <td>Qobj with only states listed in 'inds'.</td> </tr> <tr> <td>Full</td> <td>`Q.full()`</td> <td>Returns full (dense) array of Qobj data.</td> </tr> <tr> <td>Groundstate</td> <td>`Q.groundstate()`</td> <td>Eigenvalue &amp; vector of Qobj ground state.</td> </tr> <tr> <td>Matrix Element</td> <td>`Q.matrix_element(bra,ket)`</td> <td>Matrix element &lt;bra|Q|ket&gt;.</td> </tr> <tr> <td>Norm</td> <td>`Q.norm()`</td> <td>Returns L2-norm for states and trace norm for operators.</td> </tr> <tr> <td>Overlap</td> <td>`Q.overlap(state)`</td> <td>Overlap between Qobj and a given state.</td> </tr> <tr> <td>Partial Trace</td> <td>`Q.ptrace(sel)`</td> <td>Partial trace returning components selected using 'sel'.</td> </tr> <tr> <td>Permute</td> <td>`Q.permute(order)`</td> <td>Permutes tensor structure of Qobj in a given order.</td> </tr> <tr> <td>Sqrt</td> <td>`Q.sqrt()`</td> <td>Matrix sqrt of Qobj,</td> </tr> <tr> <td>Tidyup</td> <td>`Q.tidyup()`</td> <td>Removes small elements from Qobj.</td> </tr> <tr> <td>Trace</td> <td>`Q.trace()`</td> <td>Trace of Qobj.</td> </tr> <tr> <td>Transform</td> <td>`Q.transform(inpt)`</td> <td>Basis transformation defined by matrix or list of kets given by 'inpt'.</td> </tr> <tr> <td>Transpose</td> <td>`Q.transpose()`</td> <td>Transpose of Qobj.</td> </tr> <tr> <td>Unit</td> <td>`Q.unit()`</td> <td>Returns normalized Qobj.</td> </tr> </table> End of explanation """
jArumugam/python-notes
P09Advanced Functions Test.ipynb
mit
def word_lengths(phrase): # return map(lambda word: len(word), [word for word in phrase.split()]) return map(lambda word: len(word), phrase.split()) word_lengths('How long are the words in this phrase') """ Explanation: Advanced Functions Test For this test, you should use the built-in functions to be able to write the requested functions in one line. Problem 1 Use map to create a function which finds the length of each word in the phrase (broken by spaces) and return the values in a list. The function will have an input of a string, and output a list of integers. End of explanation """ def digits_to_num(digits): return reduce(lambda x,y: 10*x+y, digits) digits_to_num([3,4,3,2,1]) """ Explanation: Problem 2 Use reduce to take a list of digits and return the number that they correspond to. Do not convert the integers to strings! End of explanation """ def filter_words(word_list, letter): # first attempt #return filter(lambda x: x == letter, [word[0] for word in word_list]) return filter(lambda x: x[0] == letter, [word for word in word_list]) # not this is reduntant. the following will just do # return filter(lambda word: word[0]==letter,word_list) l = ['hello','are','cat','dog','ham','hi','go','to','heart'] filter_words(l,'h') """ Explanation: Problem 3 Use filter to return the words from a list of words which start with a target letter. End of explanation """ def concatenate(L1, L2, connector): return [x+connector+y for x,y in zip(L1,L2)] concatenate(['A','B'],['a','b'],'-') """ Explanation: Problem 4 Use zip and list comprehension to return a list of the same length where each value is the two strings from L1 and L2 concatenated together with connector between them. Look at the example output below: End of explanation """ def d_list(L): dout = {} for item,val in enumerate(L): dout[val] = item return dout # first attempt return list #return [{val:item} for item,val in enumerate(L)] # or simply # return {key:value for value,key in enumerate(L)} # use of dictionary comprehension is surprising d_list(['a','b','c']) """ Explanation: Problem 5 Use enumerate and other skills to return a dictionary which has the values of the list as keys and the index as the value. You may assume that a value will only appear once in the given list. End of explanation """ def count_match_index(L): return len(filter(lambda x: x[0]==x[1], [pair for pair in enumerate(L)])) # attempts # return [pair for pair in enumerate(L)] #return filter(lambda[pair for pair in enumerate(L)]) # portilla's answer # return len([num for count,num in enumerate(L) if num == count]) count_match_index([0,2,2,1,5,5,6,10]) """ Explanation: Problem 6 Use enumerate and other skills from above to return the count of the number of items in the list whose value equals its index. End of explanation """
texib/deeplearning_homework
tensor-flow-exercises/2_fullyconnected.ipynb
mit
# These are all the modules we'll be using later. Make sure you can import them # before proceeding further. import cPickle as pickle import numpy as np import tensorflow as tf """ Explanation: Deep Learning with TensorFlow Credits: Forked from TensorFlow by Google Setup Refer to the setup instructions. Exercise 2 Previously in 1_notmnist.ipynb, we created a pickle with formatted datasets for training, development and testing on the notMNIST dataset. The goal of this exercise is to progressively train deeper and more accurate models using TensorFlow. End of explanation """ pickle_file = 'notMNIST.pickle' with open(pickle_file, 'rb') as f: save = pickle.load(f) train_dataset = save['train_dataset'] train_labels = save['train_labels'] valid_dataset = save['valid_dataset'] valid_labels = save['valid_labels'] test_dataset = save['test_dataset'] test_labels = save['test_labels'] del save # hint to help gc free up memory print 'Training set', train_dataset.shape, train_labels.shape print 'Validation set', valid_dataset.shape, valid_labels.shape print 'Test set', test_dataset.shape, test_labels.shape """ Explanation: First reload the data we generated in 1_notmist.ipynb. End of explanation """ image_size = 28 num_labels = 10 def reformat(dataset, labels): dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32) # Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...] labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32) return dataset, labels train_dataset, train_labels = reformat(train_dataset, train_labels) valid_dataset, valid_labels = reformat(valid_dataset, valid_labels) test_dataset, test_labels = reformat(test_dataset, test_labels) print 'Training set', train_dataset.shape, train_labels.shape print 'Validation set', valid_dataset.shape, valid_labels.shape print 'Test set', test_dataset.shape, test_labels.shape """ Explanation: Reformat into a shape that's more adapted to the models we're going to train: - data as a flat matrix, - labels as float 1-hot encodings. End of explanation """ # With gradient descent training, even this much data is prohibitive. # Subset the training data for faster turnaround. train_subset = -1 graph = tf.Graph() with graph.as_default(): # Input data. # Load the training, validation and test data into constants that are # attached to the graph. tf_train_dataset = tf.constant(train_dataset[:train_subset, :]) tf_train_labels = tf.constant(train_labels[:train_subset]) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. # These are the parameters that we are going to be training. The weight # matrix will be initialized using random valued following a (truncated) # normal distribution. The biases get initialized to zero. weights = tf.Variable( tf.truncated_normal([image_size * image_size, num_labels])) biases = tf.Variable(tf.zeros([num_labels])) # Training computation. # We multiply the inputs with the weight matrix, and add biases. We compute # the softmax and cross-entropy (it's one operation in TensorFlow, because # it's very common, and it can be optimized). We take the average of this # cross-entropy across all training examples: that's our loss. logits = tf.matmul(tf_train_dataset, weights) + biases loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) # Optimizer. # We are going to find the minimum of this loss using gradient descent. optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # Predictions for the training, validation, and test data. # These are not part of training, but merely here so that we can report # accuracy figures as we train. train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax( tf.matmul(tf_valid_dataset, weights) + biases) test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases) """ Explanation: We're first going to train a multinomial logistic regression using simple gradient descent. TensorFlow works like this: * First you describe the computation that you want to see performed: what the inputs, the variables, and the operations look like. These get created as nodes over a computation graph. This description is all contained within the block below: with graph.as_default(): ... Then you can run the operations on this graph as many times as you want by calling session.run(), providing it outputs to fetch from the graph that get returned. This runtime operation is all contained in the block below: with tf.Session(graph=graph) as session: ... Let's load all the data into TensorFlow and build the computation graph corresponding to our training: End of explanation """ num_steps = 801 def accuracy(predictions, labels): return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0]) with tf.Session(graph=graph) as session: # This is a one-time operation which ensures the parameters get initialized as # we described in the graph: random weights for the matrix, zeros for the # biases. tf.initialize_all_variables().run() print 'Initialized' for step in xrange(num_steps): # Run the computations. We tell .run() that we want to run the optimizer, # and get the loss value and the training predictions returned as numpy # arrays. _, l, predictions = session.run([optimizer, loss, train_prediction]) if (step % 10 == 0): print 'Loss at step', step, ':', l print 'Training accuracy: %.1f%%' % accuracy( predictions, train_labels[:train_subset, :]) # Calling .eval() on valid_prediction is basically like calling run(), but # just to get that one numpy array. Note that it recomputes all its graph # dependencies. print 'Validation accuracy: %.1f%%' % accuracy( valid_prediction.eval(), valid_labels) print 'Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels) """ Explanation: Let's run this computation and iterate: End of explanation """ batch_size = 128 graph = tf.Graph() with graph.as_default(): # Input data. For the training data, we use a placeholder that will be fed # at run time with a training minibatch. tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. weights = tf.Variable( tf.truncated_normal([image_size * image_size, num_labels])) biases = tf.Variable(tf.zeros([num_labels])) # Training computation. logits = tf.matmul(tf_train_dataset, weights) + biases loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) # Optimizer. optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax( tf.matmul(tf_valid_dataset, weights) + biases) test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases) """ Explanation: Let's now switch to stochastic gradient descent training instead, which is much faster. The graph will be similar, except that instead of holding all the training data into a constant node, we create a Placeholder node which will be fed actual data at every call of sesion.run(). End of explanation """ num_steps = 3001 with tf.Session(graph=graph) as session: tf.initialize_all_variables().run() print "Initialized" for step in xrange(num_steps): # Pick an offset within the training data, which has been randomized. # Note: we could use better randomization across epochs. offset = (step * batch_size) % (train_labels.shape[0] - batch_size) # Generate a minibatch. batch_data = train_dataset[offset:(offset + batch_size), :] batch_labels = train_labels[offset:(offset + batch_size), :] # Prepare a dictionary telling the session where to feed the minibatch. # The key of the dictionary is the placeholder node of the graph to be fed, # and the value is the numpy array to feed to it. feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, predictions = session.run( [optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 500 == 0): print "Minibatch loss at step", step, ":", l print "Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels) print "Validation accuracy: %.1f%%" % accuracy( valid_prediction.eval(), valid_labels) print "Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels) """ Explanation: Let's run it: End of explanation """ batch_size = 128 graph = tf.Graph() with graph.as_default(): # Input data. For the training data, we use a placeholder that will be fed # at run time with a training minibatch. tf_train_dataset = tf.placeholder(tf.float32, shape=(None, image_size * image_size)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. l1_size = 1000 l2_size = 30 weights_l1 = tf.Variable( tf.truncated_normal([image_size * image_size, l1_size])) biases_l1 = tf.Variable(tf.zeros([l1_size])) weights_l2 = tf.Variable( tf.truncated_normal([l1_size, l2_size])) biases_l2 = tf.Variable(tf.zeros([l2_size])) weights_output = tf.Variable( tf.truncated_normal([l1_size ,num_labels])) biases_output = tf.Variable(tf.zeros([num_labels])) # Training computation. l1_output = tf.nn.relu(tf.matmul(tf_train_dataset,weights_l1) + biases_l1) l2_output = tf.nn.sigmoid(tf.matmul(l1_output,weights_l2) + biases_l2) logits = tf.matmul(l1_output, weights_output) + biases_output loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) # Optimizer. optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax(logits) # valid_prediction = tf.nn.softmax( # tf.matmul(tf_valid_dataset, weights) + biases) # test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights_l1) + biases_l1) num_steps = 3001 with tf.Session(graph=graph) as session: tf.initialize_all_variables().run() print "Initialized" for step in xrange(num_steps): # Pick an offset within the training data, which has been randomized. # Note: we could use better randomization across epochs. offset = (step * batch_size) % (train_labels.shape[0] - batch_size) # Generate a minibatch. batch_data = train_dataset[offset:(offset + batch_size), :] batch_labels = train_labels[offset:(offset + batch_size), :] # Prepare a dictionary telling the session where to feed the minibatch. # The key of the dictionary is the placeholder node of the graph to be fed, # and the value is the numpy array to feed to it. feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, predictions = session.run( [optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 500 == 0): print "Minibatch loss at step", step, ":", l print "V acc",accuracy(session.run(train_prediction,feed_dict={tf_train_dataset:valid_dataset,}),valid_labels) # print "Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels) # print "Validation accuracy: %.1f%%" % accuracy( # valid_prediction.eval(), valid_labels) # print "Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels) """ Explanation: Problem Turn the logistic regression example with SGD into a 1-hidden layer neural network with rectified linear units (nn.relu()) and 1024 hidden nodes. This model should improve your validation / test accuracy. End of explanation """
carltoews/tennis
results/.ipynb_checkpoints/DI_plot1-checkpoint.ipynb
gpl-3.0
from IPython.display import display, HTML display(HTML('''<img src="image1.png",width=800,height=500">''')) """ Explanation: Plot 1: The predictive potential of rank difference End of explanation """ import numpy as np # numerical libraries import pandas as pd # for data analysis import matplotlib as mpl # a big library with plotting functionality import matplotlib.pyplot as plt # a subset of matplotlib with most of the useful tools import IPython as IP %matplotlib inline import pdb from sklearn import linear_model as lm """ Explanation: Description: The most obvious feature by which to predict the outcome of a match is to use the difference in player ranking. To assign these rankings, the Association of Tennis Professionals (ATP) assign "ranking points" to players each time they win a match, with extra points awarded for upsets and important victories. Players are then ranked according to who has the most ranking points. It is not apriori obvious, however, to what extent rank differences function as a predictive tool, nor how such derived prediction might compare to those implied by open betting markets. The following figure provides three different ways to think about these questions. The left hand figure shows a probability curve that emerges from a logistic regression on rank point differences. The $x-$axis represents the absolute value of the difference in rank points, scaled by its standard deviation. The logistic curve is superimposed over a histogram of empircal probabalities, achieved by binning match outcomes by some discrete subsample of scaled rank-point differences. The plot highlights where and to what extent the logistic curve succeeds in matching empirical probabilities. The center plot shows the same curve superimposed over a scatterplot of the probabilities implied by the odds markets. The scatterplot is achieved by taking the latest published odds for each winner, transforming them to probabilities, and plotting against the scaled difference in rank points. This plot illustrates the extent to which the single, rank-based logistic probability needs to serve as proxy for the host of probabilities emerging from the odds market. The right hand plot synthesizes the information in the center plot by showing the standard deviation of the implied probabilities, grouped along discrete samples of scaled rank point differences. The strong negative slope of the regression line gives a rough idea of the rate at which these deviations decrease as a function of normalized rank difference. Since our objective is to beat the odds market, the regime of maximal interest is the one where the odds market is most uncertain. Code to produce the plot: End of explanation """ odds= pd.read_pickle('../data/pickle_files/odds.pkl') matches= pd.read_pickle('../data/pickle_files/matches.pkl') data = pd.merge(matches,odds[['PSW','PSL','key_o']].dropna(axis=0,subset=["PSW"]),how='inner',on='key_o') data = data[~data.winner_rank_points.isnull() & ~data.loser_rank_points.isnull()] IP.display.display(data[0:3]) """ Explanation: Load data and take a peak at it. End of explanation """ data['year'] = data['tourney_date'].map(lambda x: x.year) training = data[data.year.isin([2010,2011,2012])] validation = data[data.year.isin([2013,2014])] test = data[data.year.isin([2015,2016])] """ Explanation: Separate data into training, validation, and test sets. (This division is not used for the plot above, but will be critical in assessing the performance of our learning algorithms.) End of explanation """ # consider rank difference to be positive if winner higher ranked, otherwise negative rank_diff = (training['winner_rank_points'] - training['loser_rank_points']).values # if higher ranked player won, raw rank was a successful predictor y = (rank_diff > 0)*1 # predictions done *before* the match, so algorithm operates on absolute value of rank difference X = np.abs(rank_diff) # for numerical well-behavedness, we need to scale and center the data X=(X/np.std(X,axis=0)) """ Explanation: Define each match as a 1 or a 0, depending on whether the higher ranked player won. End of explanation """ lr = lm.LogisticRegression(C=1., solver='lbfgs') lr.fit(X.reshape(len(X),-1),y*1) cofs = lr.coef_[0] """ Explanation: Perform 1-D logistic regression on training data. End of explanation """ # define figure and axes fig = plt.figure(figsize=(15,5)) ax0 = fig.add_subplot(131) ax1 = fig.add_subplot(132) ax2 = fig.add_subplot(133) # figure A: predicted probabilities vs. empirical probs hist, bin_edges = np.histogram(X,bins=100) p = [np.sum(y[np.where((X>=bin_edges[i]) & (X<bin_edges[i+1]))[0]])/np.max([hist[i],1]) for i in np.arange(len(bin_edges)-1)] bar_pos = np.arange(len(p)) bar_width = np.diff(bin_edges) ax0.bar(bin_edges[0:-1], p, width=bar_width, align='edge', alpha=0.5) r = np.arange(X.min(),X.max(),.1) s = 1/(1+np.exp(-cofs[0]*r)) ax0.plot(r,s,'r') ax0.set_xlabel('Scaled rank difference',fontsize=12) ax0.set_ylabel('Probability that higher ranked wins',fontsize=12) ax0.set_title('Logistic fit to empirical probabilities',fontsize=12) ax0.legend(['Logistic probability curve','Empirical probability hist.']) # figure B: probabilities predicted by odds market ProbW = 1/training.PSW ProbL = 1/training.PSL idx = (training.winner_rank_points>training.loser_rank_points) odds_prob=np.where(idx,ProbW,ProbL) t = pd.DataFrame({'X':X,'odds_prob':odds_prob}) ts = t.sort_values('X') ax1.plot(ts['X'],ts['odds_prob'],'.b') ax1.plot(r,s,'r') ax1.set_xlabel('Scaled rank difference',fontsize=12) ax1.set_ylabel('Probability higher ranked wins',fontsize=12) ax1.set_title('Probabilities implied by odds market.',fontsize=12) ax1.legend(['Odds market probabilities','Logistic probability curve']) # Fig C: variance in odds probabilities as a function of rank difference x_odds = ts['X'].values.reshape(len(ts),-1) y_odds = ts['odds_prob'].values hist, bin_edges = np.histogram(x_odds,bins=10) stds = [np.std(y_odds[np.where((X>=bin_edges[i]) & (X<bin_edges[i+1]))]) for i in np.arange(len(bin_edges)-1)] reg = lm.LinearRegression() reg.fit (bin_edges[0:-1].reshape(10,1),stds) yv=reg.predict(bin_edges[0:-1].reshape(10,1)) ax2.plot(bin_edges[0:-1],stds,'*b') ax2.plot(bin_edges[0:-1],yv,'r') ax2.set_xlabel('Scaled rank difference',fontsize=12) ax2.set_ylabel('Variance of market prob.',fontsize=12) ax2.set_title('Trends in stdev of implied probabilities',fontsize=12) ax2.legend(['Stdev of binned market-probs.','Regression line']) """ Explanation: Produce the plots: End of explanation """
n-witt/MachineLearningWithText_SS2017
tutorials/7 Principal Component Analysis.ipynb
gpl-3.0
%matplotlib inline import numpy as np import matplotlib.pyplot as plt import seaborn as sns; sns.set() """ Explanation: Principal Component Analysis (PCA) Up until now we've only talked about supervised methods. What were these again? Now we want to discuss unsupervised methods that highlight aspects of data without known labels. Fundamentally PCA is a dimensionality reduction method. As such it may be used for example for feature extraction, visualization and noise filtering. End of explanation """ rng = np.random.RandomState(1) X = np.dot(rng.rand(2, 2), rng.randn(2, 200)).T plt.scatter(X[:, 0], X[:, 1]) plt.axis('equal'); """ Explanation: Intuition of PCA They way PCA works is easiest explained by visualizing it's behaviour. So let's print a two dimensional dataset: End of explanation """ from sklearn.decomposition import PCA pca = PCA(n_components=2) pca.fit(X) """ Explanation: By eye, what can we say about this dataset? Is there a relationship with between x and y? and if so, what type/kind of relationship is it? We had similar datset in the Introducing Scikit-Learn. What did we do then? PCA tries to create a list of the principal axes in the data. Let's employ Scikit-Learn do that for us: End of explanation """ print(pca.components_) print(pca.explained_variance_) """ Explanation: PCA learns what the components are an how variance is explained by them. End of explanation """ def draw_vector(v0, v1, ax=None): ax = ax or plt.gca() arrowprops=dict(arrowstyle='->', linewidth=2, shrinkA=0, shrinkB=0) ax.annotate('', v1, v0, arrowprops=arrowprops) # plot data plt.scatter(X[:, 0], X[:, 1], alpha=0.2) for length, vector in zip(pca.explained_variance_, pca.components_): v = vector * 3 * np.sqrt(length) draw_vector(pca.mean_, pca.mean_ + v) plt.axis('equal'); """ Explanation: What could that possibly mean? Any ideas? When it's plotted it becomes clearer End of explanation """ X = rng.randn(250, 2) plt.scatter(X[:, 0], X[:, 1]) # fit estimator pca = PCA(n_components=2) pca.fit(X) # plot data plt.scatter(X[:, 0], X[:, 1], alpha=1) for length, vector in zip(pca.explained_variance_, pca.components_): v = vector * 3 * np.sqrt(length) draw_vector(pca.mean_, pca.mean_ + v) """ Explanation: What are you seeing? There are three important aspects: The direction, the origin, and the length of the vectors. Each vector is a principal component. The length indicates how "important" this component is. Mathmatically: It's the variance of the data projected onto that principal axes. The direction indicates the position of the principal component. The origin is the mean of the data in any dimension. What do the principal components of the following dataset look like? End of explanation """ rng = np.random.RandomState(1) X = np.dot(rng.rand(2, 2), rng.randn(2, 200)).T pca = PCA(n_components=1) pca.fit(X) X_pca = pca.transform(X) print("original shape: ", X.shape) print("transformed shape:", X_pca.shape) X_new = pca.inverse_transform(X_pca) plt.scatter(X[:, 0], X[:, 1], alpha=0.5) plt.scatter(X_new[:, 0], X_new[:, 1], alpha=0.5) plt.axis('equal'); """ Explanation: Dimensionality reduction So, how can we use that in order to reduce the dimensionality of our dataset? Note: We want to cancel out dimensions in a way that the distance between datapoints is preserved as good as possible. Let's have a look: End of explanation """ import seaborn as sns iris = sns.load_dataset('iris') X_iris = iris.drop('species', axis=1) y_iris = iris['species'] iris.head() from sklearn.decomposition import PCA model = PCA(n_components=2) model.fit(X_iris) X_2D = model.transform(X_iris) colormap = y_iris.copy() colormap[colormap == 'setosa'] = 'b' colormap[colormap == 'virginica'] = 'r' colormap[colormap == 'versicolor'] = 'g' plt.scatter(X_2D[:, 0], X_2D[:, 1], c=colormap) plt.xlabel('PCA1') plt.xlabel('PCA2') """ Explanation: What has happend? The information along the least important principal axis or axes is removed The component(s) of the data with the highest variance remain. The fraction of variance that is cut out is roughly a measure of how much "information" is discarded in this reduction of dimensionality. What does that mean? This reduced-dimension dataset is "good enough" to encode the most important relationships between the points Despite reducing the dimension of the data by 50%, the overall relationship between the data points are mostly preserved. PCA as dimensionality reduction: Iris dataset Recall: The iris dataset is four dimensional End of explanation """ from sklearn.datasets import load_digits digits = load_digits() def plot_digits(data): fig, axes = plt.subplots(4, 10, figsize=(10, 4), subplot_kw={'xticks':[], 'yticks':[]}, gridspec_kw=dict(hspace=0.1, wspace=0.1)) for i, ax in enumerate(axes.flat): ax.imshow(data[i].reshape(8, 8), cmap='binary', interpolation='nearest', clim=(0, 16)) plot_digits(digits.data) """ Explanation: What do we see from this plot? In the two-dimensional representation, the species are fairly well separated. Remember, the PCA algorithm had no knowledge of the species labels! Classification will probably be effective on the dataset. PCA as Noise Filtering: Digits dataset PCA can be used to filter noise The idea is this: any components with variance much larger than the effect of the noise should be relatively unaffected by the noise. If you reconstruct the data using just the components explaing the most variance, you should be preferentially keeping the signal and throwing out the noise. Let's see how this looks with the digits data. First we will plot several of the input noise-free data: End of explanation """ np.random.seed(42) noisy = np.random.normal(digits.data, 4) plot_digits(noisy) """ Explanation: Now, let's add some noise: End of explanation """ pca = PCA(0.50).fit(noisy) pca.n_components_ components = pca.transform(noisy) filtered = pca.inverse_transform(components) plot_digits(filtered) """ Explanation: Let's train a PCA on the noisy data, requesting that the projection preserve 50% of the variance: End of explanation """
aaossa/Dear-Notebooks
Web scraping/Lessons learned in web scraping.ipynb
gpl-3.0
import requests """ Explanation: Lessons learned in web scraping End of explanation """ req = requests.head('http://www.google.com') print(req.headers['Content-Length']) req = requests.get('http://www.google.com') print(req.headers['Content-Length']) """ Explanation: Making requests Lesson 1 - Use the head! A head request is a lighter and faster way of checking if a url is serviceable, a given file exists, etc.. LastModified and ContentLength are useful. What is a HTTTP HEAD request good for? Some Uses End of explanation """
fluxcapacitor/source.ml
jupyterhub.ml/notebooks/train_deploy/zz_under_construction/zz_old/talks/DataWeekends/SparkMLDeployment/DataWeekends-Mar182017-SparkMLDeployment.ipynb
apache-2.0
# You may need to Reconnect (more than Restart) the Kernel to pick up changes to these sett import os master = '--master spark://spark-master-2-1-0:7077' conf = '--conf spark.cores.max=1 --conf spark.executor.memory=512m' packages = '--packages com.amazonaws:aws-java-sdk:1.7.4,org.apache.hadoop:hadoop-aws:2.7.1' jars = '--jars /root/lib/jpmml-sparkml-package-1.0-SNAPSHOT.jar' py_files = '--py-files /root/lib/jpmml.py' os.environ['PYSPARK_SUBMIT_ARGS'] = master \ + ' ' + conf \ + ' ' + packages \ + ' ' + jars \ + ' ' + py_files \ + ' ' + 'pyspark-shell' print(os.environ['PYSPARK_SUBMIT_ARGS']) """ Explanation: Who Am I? Chris Fregly Research Scientist, Founder @ PipelineIO Video Series Author "High Performance Tensorflow in Production" @ OReilly (Coming Soon) Founder @ Advanced Spark and Tensorflow Meetup Github Repo DockerHub Repo Slideshare YouTube Who Was I? Software Engineer @ Netflix, Databricks, IBM Spark Tech Center Types of Model Deployments KeyValue ie. Recommendations In-memory: Redis, Memcache On-disk: Cassandra, RocksDB First-class Servable in Tensorflow Serving PMML It's Useful and Well-Supported Apple, Cisco, Airbnb, HomeAway, etc Please Don't Re-build It - Reduce Your Technical Debt! Native Code Generation (CPU and GPU) Hand-coded (Python + Pickling) Generate Java Code from PMML? Tensorflow Models freeze_graph.py: Combine Tensorflow Graph (Static) with Trained Weights (Checkpoints) into Single Deployable Model Demos!! End of explanation """ from pyspark.ml.linalg import Vectors from pyspark.ml.feature import VectorAssembler, StandardScaler from pyspark.ml.feature import OneHotEncoder, StringIndexer from pyspark.ml import Pipeline, PipelineModel from pyspark.ml.regression import LinearRegression from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() """ Explanation: Deploy Spark ML Models End of explanation """ df = spark.read.format("csv") \ .option("inferSchema", "true").option("header", "true") \ .load("s3a://datapalooza/airbnb/airbnb.csv.bz2") df.registerTempTable("df") print(df.head()) print(df.count()) """ Explanation: Step 0: Load Libraries and Data End of explanation """ df_filtered = df.filter("price >= 50 AND price <= 750 AND bathrooms > 0.0 AND bedrooms is not null") df_filtered.registerTempTable("df_filtered") df_final = spark.sql(""" select id, city, case when state in('NY', 'CA', 'London', 'Berlin', 'TX' ,'IL', 'OR', 'DC', 'WA') then state else 'Other' end as state, space, cast(price as double) as price, cast(bathrooms as double) as bathrooms, cast(bedrooms as double) as bedrooms, room_type, host_is_super_host, cancellation_policy, cast(case when security_deposit is null then 0.0 else security_deposit end as double) as security_deposit, price_per_bedroom, cast(case when number_of_reviews is null then 0.0 else number_of_reviews end as double) as number_of_reviews, cast(case when extra_people is null then 0.0 else extra_people end as double) as extra_people, instant_bookable, cast(case when cleaning_fee is null then 0.0 else cleaning_fee end as double) as cleaning_fee, cast(case when review_scores_rating is null then 80.0 else review_scores_rating end as double) as review_scores_rating, cast(case when square_feet is not null and square_feet > 100 then square_feet when (square_feet is null or square_feet <=100) and (bedrooms is null or bedrooms = 0) then 350.0 else 380 * bedrooms end as double) as square_feet from df_filtered """).persist() df_final.registerTempTable("df_final") df_final.select("square_feet", "price", "bedrooms", "bathrooms", "cleaning_fee").describe().show() print(df_final.count()) print(df_final.schema) # Most popular cities spark.sql(""" select state, count(*) as ct, avg(price) as avg_price, max(price) as max_price from df_final group by state order by count(*) desc """).show() # Most expensive popular cities spark.sql(""" select city, count(*) as ct, avg(price) as avg_price, max(price) as max_price from df_final group by city order by avg(price) desc """).filter("ct > 25").show() """ Explanation: Step 1: Clean, Filter, and Summarize the Data End of explanation """ continuous_features = ["bathrooms", \ "bedrooms", \ "security_deposit", \ "cleaning_fee", \ "extra_people", \ "number_of_reviews", \ "square_feet", \ "review_scores_rating"] categorical_features = ["room_type", \ "host_is_super_host", \ "cancellation_policy", \ "instant_bookable", \ "state"] """ Explanation: Step 2: Define Continous and Categorical Features End of explanation """ [training_dataset, validation_dataset] = df_final.randomSplit([0.8, 0.2]) """ Explanation: Step 3: Split Data into Training and Validation End of explanation """ continuous_feature_assembler = VectorAssembler(inputCols=continuous_features, outputCol="unscaled_continuous_features") continuous_feature_scaler = StandardScaler(inputCol="unscaled_continuous_features", outputCol="scaled_continuous_features", \ withStd=True, withMean=False) """ Explanation: Step 4: Continous Feature Pipeline End of explanation """ categorical_feature_indexers = [StringIndexer(inputCol=x, \ outputCol="{}_index".format(x)) \ for x in categorical_features] categorical_feature_one_hot_encoders = [OneHotEncoder(inputCol=x.getOutputCol(), \ outputCol="oh_encoder_{}".format(x.getOutputCol() )) \ for x in categorical_feature_indexers] """ Explanation: Step 5: Categorical Feature Pipeline End of explanation """ feature_cols_lr = [x.getOutputCol() \ for x in categorical_feature_one_hot_encoders] feature_cols_lr.append("scaled_continuous_features") feature_assembler_lr = VectorAssembler(inputCols=feature_cols_lr, \ outputCol="features_lr") """ Explanation: Step 6: Assemble our Features and Feature Pipeline End of explanation """ linear_regression = LinearRegression(featuresCol="features_lr", \ labelCol="price", \ predictionCol="price_prediction", \ maxIter=10, \ regParam=0.3, \ elasticNetParam=0.8) estimators_lr = \ [continuous_feature_assembler, continuous_feature_scaler] \ + categorical_feature_indexers + categorical_feature_one_hot_encoders \ + [feature_assembler_lr] + [linear_regression] pipeline = Pipeline(stages=estimators_lr) pipeline_model = pipeline.fit(training_dataset) print(pipeline_model) """ Explanation: Step 7: Train a Linear Regression Model End of explanation """ from jpmml import toPMMLBytes model_bytes = toPMMLBytes(spark, training_dataset, pipeline_model) print(model_bytes.decode("utf-8")) """ Explanation: Step 8: Serialize PipelineModel End of explanation """ import urllib.request namespace = 'default' model_name = 'airbnb' version = '1' update_url = 'http://prediction-pmml-aws.demo.pipeline.io/update-pmml-model/%s/%s/%s' % (namespace, model_name, version) update_headers = {} update_headers['Content-type'] = 'application/xml' req = urllib.request.Request(update_url, \ headers=update_headers, \ data=model_bytes) resp = urllib.request.urlopen(req) print(resp.status) # Should return Http Status 200 """ Explanation: Step 9: Push Model to Live, Running Spark ML Model Server (Mutable) End of explanation """ import urllib.parse import json namespace = 'default' model_name = 'airbnb' version = '1' evaluate_url = 'http://prediction-pmml-aws.demo.pipeline.io/evaluate-pmml-model/%s/%s/%s' % (namespace, model_name, version) evaluate_headers = {} evaluate_headers['Content-type'] = 'application/json' input_params = '{"bathrooms":5.0, \ "bedrooms":4.0, \ "security_deposit":175.00, \ "cleaning_fee":25.0, \ "extra_people":1.0, \ "number_of_reviews": 2.0, \ "square_feet": 250.0, \ "review_scores_rating": 2.0, \ "room_type": "Entire home/apt", \ "host_is_super_host": "0.0", \ "cancellation_policy": "flexible", \ "instant_bookable": "1.0", \ "state": "CA"}' encoded_input_params = input_params.encode('utf-8') req = urllib.request.Request(evaluate_url, \ headers=evaluate_headers, \ data=encoded_input_params) resp = urllib.request.urlopen(req) print(resp.read()) """ Explanation: Step 10: Evalute Model End of explanation """ from urllib import request sourceBytes = ' \n\ private String str; \n\ \n\ public void initialize(Map<String, Object> args) { \n\ } \n\ \n\ public Object predict(Map<String, Object> inputs) { \n\ String id = (String)inputs.get("id"); \n\ \n\ return id.equals("21619"); \n\ } \n\ '.encode('utf-8') """ Explanation: Bonus Demos! Deploy Java-based Model Create Java-based Model End of explanation """ from urllib import request namespace = 'default' model_name = 'java_equals' version = '1' update_url = 'http://prediction-java-aws.demo.pipeline.io/update-java/%s/%s/%s' % (namespace, model_name, version) update_headers = {} update_headers['Content-type'] = 'text/plain' req = request.Request("%s" % update_url, headers=update_headers, data=sourceBytes) resp = request.urlopen(req) generated_code = resp.read() print(generated_code.decode('utf-8')) """ Explanation: Deploy Java-based Model End of explanation """ from urllib import request namespace = 'default' model_name = 'java_equals' version = '1' evaluate_url = 'http://prediction-java-aws.demo.pipeline.io/evaluate-java/%s/%s/%s' % (namespace, model_name, version) evaluate_headers = {} evaluate_headers['Content-type'] = 'application/json' input_params = '{"id":"21618"}' encoded_input_params = input_params.encode('utf-8') req = request.Request(evaluate_url, headers=evaluate_headers, data=encoded_input_params) resp = request.urlopen(req) print(resp.read()) # Should return false from urllib import request namespace = 'default' model_name = 'java_equals' version = '1' evaluate_url = 'http://prediction-java-aws.demo.pipeline.io/evaluate-java/%s/%s/%s' % (namespace, model_name, version) evaluate_headers = {} evaluate_headers['Content-type'] = 'application/json' input_params = '{"id":"21619"}' encoded_input_params = input_params.encode('utf-8') req = request.Request(evaluate_url, headers=evaluate_headers, data=encoded_input_params) resp = request.urlopen(req) print(resp.read()) # Should return true """ Explanation: Evaluate Java-based Model End of explanation """ !pip install sklearn_pandas !pip install git+https://github.com/jpmml/sklearn2pmml.git """ Explanation: Deploy Scikit-Learn Model End of explanation """ import pandas as pd import numpy as np import urllib.request import urllib.parse import json from sklearn.datasets import load_diabetes,load_iris from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error as mse, r2_score from sklearn2pmml import PMMLPipeline from sklearn.tree import DecisionTreeClassifier from sklearn2pmml import sklearn2pmml iris = load_iris() iris_df = pd.DataFrame(iris.data,columns=iris.feature_names) iris_df['Species'] = iris.target iris_pipeline = PMMLPipeline([ ("classifier", DecisionTreeClassifier()) ]) iris_pipeline.fit(iris_df[iris_df.columns.difference(["Species"])], iris_df["Species"]) """ Explanation: Create Scikit-Learn Model End of explanation """ sklearn2pmml(iris_pipeline, "DecisionTreeIris.pmml", with_repr = True) model_bytes = bytearray(open('DecisionTreeIris.pmml', 'rb').read()) """ Explanation: Serialize Scikit-Learn Model End of explanation """ import urllib.request import urllib.parse namespace = 'default' model_name = 'iris' version = '1' update_url = 'http://prediction-pmml-aws.demo.pipeline.io/update-pmml-model/%s/%s/%s' % (namespace, model_name, version) update_headers = {} update_headers["Content-type"] = "application/xml" req = urllib.request.Request(update_url, headers=update_headers, data=model_bytes) resp = urllib.request.urlopen(req) print(resp.status) namespace = 'default' model_name = 'iris' version = '1' evaluate_url = 'http://prediction-pmml-aws.demo.pipeline.io/evaluate-pmml-model/%s/%s/%s' % (namespace, model_name, version) evaluate_headers = {} evaluate_headers['Content-type'] = 'application/json' input_params = iris_df.ix[0,:-1].to_json() encoded_input_params = input_params.encode('utf-8') req = urllib.request.Request(evaluate_url, headers=evaluate_headers, data=encoded_input_params) resp = urllib.request.urlopen(req) print(resp.read()) """ Explanation: Deploy Scikit-Learn Model End of explanation """ from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) from IPython.display import clear_output, Image, display, HTML html = '<iframe width=1200px height=500px src="http://hystrix.demo.pipeline.io/hystrix-dashboard/monitor/monitor.html?streams=%5B%7B%22name%22%3A%22Predictions%20-%20AWS%22%2C%22stream%22%3A%22http%3A%2F%2Fturbine-aws.demo.pipeline.io%2Fturbine.stream%22%2C%22auth%22%3A%22%22%2C%22delay%22%3A%22%22%7D%2C%7B%22name%22%3A%22Predictions%20-%20GCP%22%2C%22stream%22%3A%22http%3A%2F%2Fturbine-gcp.demo.pipeline.io%2Fturbine.stream%22%2C%22auth%22%3A%22%22%2C%22delay%22%3A%22%22%7D%5D">' display(HTML(html)) """ Explanation: Monitoring Your Models Netflix Microservices Dashboard (Hystrix) End of explanation """ from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) from IPython.display import clear_output, Image, display, HTML html = '<iframe width=1200px height=500px src="http://grafana.demo.pipeline.io">' display(HTML(html)) """ Explanation: Grafana + Prometheus Dashboard End of explanation """ # Spark ML - Airbnb !kubectl create --context=awsdemo -f /root/pipeline/loadtest.ml/loadtest-aws-airbnb-rc.yaml # Codegen - Java - Simple !kubectl create --context=awsdemo -f /root/pipeline/loadtest.ml/loadtest-aws-equals-rc.yaml # Tensorflow AI - Tensorflow Serving - Simple !kubectl create --context=awsdemo -f /root/pipeline/loadtest.ml/loadtest-aws-minimal-rc.yaml """ Explanation: Load-Test Your Model Servers Run JMeter Tests from Local Laptop (Limited by Laptop Performance) Run Headless JMeter Tests from Training Clusters in Cloud End of explanation """ !kubectl delete --context=awsdemo rc loadtest-aws-airbnb !kubectl delete --context=awsdemo rc loadtest-aws-equals !kubectl delete --context=awsdemo rc loadtest-aws-minimal """ Explanation: End Load Tests End of explanation """ !kubectl rolling-update prediction-tensorflow --context=awsdemo --image-pull-policy=Always --image=fluxcapacitor/prediction-tensorflow """ Explanation: Rolling Deploy End of explanation """
tensorflow/workshops
extras/tensorflow_lattice/04_lattice_basics.ipynb
apache-2.0
!pip install tensorflow_lattice import tensorflow as tf import tensorflow_lattice as tfl from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter import numpy as np """ Explanation: Basics of lattice models In this notebook, we'll explain a lattice model, an interpolated lookup table. In addition, we'll show how monotonicity and smooth regularizers can change the model. First we need to import libraries we're going to use. End of explanation """ # Hypercube (multilinear) interpolation in a 2 x 2 lattice. # params[0] == lookup value at (0, 0) # params[1] == lookup value at (0, 1) # params[2] == lookup value at (1, 0) # params[3] == lookup value at (1, 1) def twod(x1, x2, params): y = ((1 - x1) * (1 - x2) * params[0] + (1 - x1) * x2 * params[1] + x1 * (1 - x2) * params[2] + x1 * x2 * params[3]) return y # This function will generate 3d plot for lattice function values. # params uniquely characterizes the lattice lookup values. def lattice_surface(params): print('Lattice params:') print(params) %matplotlib inline fig = plt.figure() ax = fig.gca(projection='3d') # Make data. n = 50 xv, yv = np.meshgrid(np.linspace(0.0, 1.0, num=n), np.linspace(0.0, 1.0, num=n)) zv = np.zeros([n, n]) for k1 in range(n): for k2 in range(n): zv[k1, k2] = twod(xv[k1, k2], yv[k1, k2], params) # Plot the surface. surf = ax.plot_surface(xv, yv, zv, cmap=cm.coolwarm) # Customize the z axis. ax.set_zlim(0.0, 1.0) ax.zaxis.set_major_locator(LinearLocator(10)) ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) # Add a color bar which maps values to colors. fig.colorbar(surf, shrink=0.5, aspect=5,) """ Explanation: Lattice model visualization Now, let us define helper functions for visualizing the surface of 2d lattice. End of explanation """ # This will plot the surface plot. lattice_surface([0.0, 1.0, 1.0, 0.0]) """ Explanation: Let's draw a surface of 2d lattice model. This model represents an "XOR" function. End of explanation """ # Reset the graph. tf.reset_default_graph() # Prepare the dataset. x_data = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]] y_data = [[0.0], [1.0], [1.0], [0.0]] # Define placeholders. x = tf.placeholder(dtype=tf.float32, shape=(None, 2)) y_ = tf.placeholder(dtype=tf.float32, shape=(None, 1)) # 2 x 2 lattice with 1 output. # lattice_param is [output_dim, 4] tensor. lattice_sizes = [2, 2] (y, lattice_param, _, _) = tfl.lattice_layer( x, lattice_sizes=[2, 2], output_dim=1) # Sqaured loss loss = tf.reduce_mean(tf.square(y - y_)) # Minimize! train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss) sess = tf.Session() sess.run(tf.global_variables_initializer()) # Iterate 100 times for _ in range(100): sess.run(train_op, feed_dict={x: x_data, y_: y_data}) # Fetching trained lattice parameter. lattice_param_val = sess.run(lattice_param) # Draw the surface! lattice_surface(lattice_param_val[0]) """ Explanation: Train XOR function We'll provide a synthetic data that represents the "XOR" function, that is f(0, 0) = 0 f(0, 1) = 1 f(1, 0) = 1 f(1, 1) = 0 and check whether a lattice can learn this function. End of explanation """ tf.reset_default_graph() x_data = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]] y_data = [[0.0], [1.0], [1.0], [0.0]] x = tf.placeholder(dtype=tf.float32, shape=(None, 2)) y_ = tf.placeholder(dtype=tf.float32, shape=(None, 1)) # 2 x 2 lattice with 1 output. # lattice_param is [output_dim, 4] tensor. lattice_sizes = [2, 2] (y, lattice_param, projection_op, _) = tfl.lattice_layer( x, lattice_sizes=[2, 2], output_dim=1, is_monotone=True) # Sqaured loss loss = tf.reduce_mean(tf.square(y - y_)) # Minimize! train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss) sess = tf.Session() sess.run(tf.global_variables_initializer()) # Iterate 100 times for _ in range(100): # Apply gradient. sess.run(train_op, feed_dict={x: x_data, y_: y_data}) # Then projection. sess.run(projection_op) # Fetching trained lattice parameter. lattice_param_val = sess.run(lattice_param) # Draw it! # You can see that the prediction does not decrease. lattice_surface(lattice_param_val[0]) """ Explanation: Train with monotonicity Now we'll set monotonicity in a lattice model. We'll use the same synthetic data generated by "XOR" function, but now we'll require full monotonicity in both directions, x1 and x2. Note that the data does not contain monotonicity, since "XOR" function value decreases, i.e., f(1, 0) > f(1, 1) and f(0, 1) > f(1, 1). So the trained model will do its best to fit the data while satisfying the monotonicity. End of explanation """ tf.reset_default_graph() x_data = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]] y_data = [[0.0], [1.0], [1.0], [0.0]] x = tf.placeholder(dtype=tf.float32, shape=(None, 2)) y_ = tf.placeholder(dtype=tf.float32, shape=(None, 1)) # 2 x 2 lattice with 1 output. # lattice_param is [output_dim, 4] tensor. lattice_sizes = [2, 2] (y, lattice_param, projection_op, _) = tfl.lattice_layer( x, lattice_sizes=[2, 2], output_dim=1, is_monotone=[True, False]) # Sqaured loss loss = tf.reduce_mean(tf.square(y - y_)) # Minimize! train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) # Iterate 100 times for _ in range(100): # Apply gradient. sess.run(train_op, feed_dict={x: x_data, y_: y_data}) # Then projection. sess.run(projection_op) # Fetching trained lattice parameter. lattice_param_val = sess.run(lattice_param) # Draw it! # You can see that the prediction does not decrease in one direction. lattice_surface(lattice_param_val[0]) """ Explanation: Train with partial monotonicity Now we'll set partial monotonicity. Here only one input is constrained to be monotonic. End of explanation """ tf.reset_default_graph() x_data = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]] y_data = [[0.0], [1.0], [1.0], [1.0]] x = tf.placeholder(dtype=tf.float32, shape=(None, 2)) y_ = tf.placeholder(dtype=tf.float32, shape=(None, 1)) # 2 x 2 lattice with 1 output. # lattice_param is [output_dim, 4] tensor. lattice_sizes = [2, 2] (y, lattice_param, _, _) = tfl.lattice_layer( x, lattice_sizes=[2, 2], output_dim=1) # Sqaured loss loss = tf.reduce_mean(tf.square(y - y_)) # Minimize! train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) # Iterate 100 times for _ in range(100): # Apply gradient. sess.run(train_op, feed_dict={x: x_data, y_: y_data}) # Fetching trained lattice parameter. lattice_param_val = sess.run(lattice_param) # Draw it! lattice_surface(lattice_param_val[0]) """ Explanation: Training OR function Now we switch to a synthetic dataset generated by "OR" function to illustrate other regularizers. f(0, 0) = 0 f(0, 1) = 1 f(1, 0) = 1 f(1, 1) = 1 End of explanation """ tf.reset_default_graph() x_data = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]] y_data = [[0.0], [1.0], [1.0], [1.0]] x = tf.placeholder(dtype=tf.float32, shape=(None, 2)) y_ = tf.placeholder(dtype=tf.float32, shape=(None, 1)) # 2 x 2 lattice with 1 output. # lattice_param is [output_dim, 4] tensor. lattice_sizes = [2, 2] (y, lattice_param, _, regularization) = tfl.lattice_layer( x, lattice_sizes=[2, 2], output_dim=1, l2_laplacian_reg=[0.0, 1.0]) # Sqaured loss loss = tf.reduce_mean(tf.square(y - y_)) loss += regularization # Minimize! train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) # Iterate 100 times for _ in range(1000): # Apply gradient. sess.run(train_op, feed_dict={x: x_data, y_: y_data}) # Fetching trained lattice parameter. lattice_param_val = sess.run(lattice_param) # Draw it! # With heavy Laplacian regularization along the second axis, the second axis's slope becomes zero. lattice_surface(lattice_param_val[0]) """ Explanation: Laplacian regularizer Laplacian regularizer puts the penalty on lookup value changes. In other words, it tries to make the slope of each face as small as possible. End of explanation """ tf.reset_default_graph() x_data = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]] y_data = [[0.0], [1.0], [1.0], [1.0]] x = tf.placeholder(dtype=tf.float32, shape=(None, 2)) y_ = tf.placeholder(dtype=tf.float32, shape=(None, 1)) # 2 x 2 lattice with 1 output. # lattice_param is [output_dim, 4] tensor. lattice_sizes = [2, 2] (y, lattice_param, _, regularization) = tfl.lattice_layer( x, lattice_sizes=[2, 2], output_dim=1, l2_torsion_reg=1.0) # Sqaured loss loss = tf.reduce_mean(tf.square(y - y_)) loss += regularization # Minimize! train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) # Iterate 1000 times for _ in range(1000): # Apply gradient. sess.run(train_op, feed_dict={x: x_data, y_: y_data}) # Fetching trained lattice parameter. lattice_param_val = sess.run(lattice_param) # Draw it! # With heavy Torsion regularization, the model becomes a linear model. lattice_surface(lattice_param_val[0]) """ Explanation: Torsion regularizer Torsion regularizer penalizes nonlinear interactions in the feature. End of explanation """
LucaCanali/Miscellaneous
Spark_Physics/HEP_benchmark/ADL_HEP_Query_Benchmark_Q1_Q5_CERNSWAN_Version.ipynb
apache-2.0
# Start the Spark Session # When Using Spark on CERN SWAN, run this cell to get the Spark Session # Note: when running SWAN for this, do not select to connect to a CERN Spark cluster # If you want to use a cluster anyway, please copy the data to a cluster filesystem first from pyspark.sql import SparkSession spark = (SparkSession.builder .appName("HEP benchmark") .master("local[*]") .config("spark.driver.memory", "4g") .config("spark.sql.orc.enableNestedColumnVectorizedReader", "true") .getOrCreate() ) # Read data for the benchmark tasks # Further details of the available datasets at # https://github.com/LucaCanali/Miscellaneous/tree/master/Spark_Physics # this works from SWAN and CERN machines with eos mounted path = "/eos/project/s/sparkdltrigger/public/" input_data = "Run2012B_SingleMu_sample.orc" # use this if you downloaded the full dataset # input_data = "Run2012B_SingleMu.orc" df_events = spark.read.orc(path + input_data) df_events.printSchema() print(f"Number of events: {df_events.count()}") """ Explanation: HEP Benchmark Queries Q1 to Q5 - CERN SWAN Version This follows the IRIS-HEP benchmark and the article Evaluating Query Languages and Systems for High-Energy Physics Data and provides implementations of the benchmark tasks using Apache Spark. The workload and data: - Benchmark jobs are implemented follwing IRIS-HEP benchmark - The input data is a series of events from CMS opendata - The job output is typically a histogram - See also https://github.com/LucaCanali/Miscellaneous/tree/master/Spark_Physics Author and contact: Luca.Canali@cern.ch February, 2022 End of explanation """ # Compute the histogram for MET_pt # The Spark function "width_bucket" is used to generate the histogram bucket number # a groupBy operation with count is used to fill the histogram # The result is a histogram with bins value and counts foreach bin (N_events) min_val = 0 max_val = 100 num_bins = 100 step = (max_val - min_val) / num_bins histogram_data = ( df_events .selectExpr(f"width_bucket(MET_pt, {min_val}, {max_val}, {num_bins}) as bucket") .groupBy("bucket") .count() .orderBy("bucket") ) # convert bucket number to the corresponding value histogram_data = histogram_data.selectExpr(f"round({min_val} + (bucket - 1/2) * {step},2) as value", "count as N_events") # The action toPandas() here triggers the computation. # Histogram data is fetched into the driver as a Pandas Dataframe. %time histogram_data_pandas=histogram_data.toPandas() import matplotlib.pyplot as plt plt.style.use('seaborn-darkgrid') plt.rcParams.update({'font.size': 20, 'figure.figsize': [14,10]}) # cut the first and last bin x = histogram_data_pandas.iloc[1:-1]["value"] y = histogram_data_pandas.iloc[1:-1]["N_events"] # line plot f, ax = plt.subplots() ax.plot(x, y, '-') ax.set_xlim(min_val, max_val) ax.set_xlabel('$𝐸^{𝑚𝑖𝑠𝑠}_T$ (GeV)') ax.set_ylabel('Number of Events') ax.set_title("Distribution of $𝐸^{𝑚𝑖𝑠𝑠}_T$ ") plt.show() """ Explanation: Benchmark task: Q1 Plot the $𝐸^{𝑚𝑖𝑠𝑠}_T$ (missing transverse energy) of all events. End of explanation """ # Jet_pt contains arrays of jet measurements df_events.select("Jet_pt").show(5,False) # Use the explode function to extract array data into DataFrame rows df_events_jet_pt = df_events.selectExpr("explode(Jet_pt) as Jet_pt") df_events_jet_pt.printSchema() df_events_jet_pt.show(10, False) # Compute the histogram for Jet_pt # The Spark function "width_bucket" is used to generate the histogram bucket number # a groupBy operation with count is used to fill the histogram # The result is a histogram with bins value and counts foreach bin (N_events) min_val = 15 max_val = 60 num_bins = 100 step = (max_val - min_val) / num_bins histogram_data = ( df_events_jet_pt .selectExpr(f"width_bucket(Jet_pt, {min_val}, {max_val}, {num_bins}) as bucket") .groupBy("bucket") .count() .orderBy("bucket") ) # convert bucket number to the corresponding value histogram_data = histogram_data.selectExpr(f"round({min_val} + (bucket - 1/2) * {step},2) as value", "count as N_events") # The action toPandas() here triggers the computation. # Histogram data is fetched into the driver as a Pandas Dataframe. %time histogram_data_pandas=histogram_data.toPandas() import matplotlib.pyplot as plt plt.style.use('seaborn-darkgrid') plt.rcParams.update({'font.size': 20, 'figure.figsize': [14,10]}) # cut the first and last bin x = histogram_data_pandas.iloc[1:-1]["value"] y = histogram_data_pandas.iloc[1:-1]["N_events"] # line plot f, ax = plt.subplots() ax.plot(x, y, '-') ax.set_xlim(min_val, max_val) ax.set_xlabel('$p_T$ (GeV)') ax.set_ylabel('Number of Events') ax.set_title("Distribution of $p_T$ ") plt.show() """ Explanation: Benchmark task: Q2 Plot the $𝑝_𝑇$ (transverse momentum) of all jets in all events End of explanation """ # Take Jet arrays for pt and eta and transform them to rows with explode() df1 = df_events.selectExpr("explode(arrays_zip(Jet_pt, Jet_eta)) as Jet") df1.printSchema() df1.show(10, False) # Apply a filter on Jet_eta q3 = df1.select("Jet.Jet_pt").filter("abs(Jet.Jet_eta) < 1") q3.show(10,False) # Compute the histogram for Jet_pt # The Spark function "width_bucket" is used to generate the histogram bucket number # a groupBy operation with count is used to fill the histogram # The result is a histogram with bins value and counts foreach bin (N_events) min_val = 15 max_val = 60 num_bins = 100 step = (max_val - min_val) / num_bins histogram_data = ( q3 .selectExpr(f"width_bucket(Jet_pt, {min_val}, {max_val}, {num_bins}) as bucket") .groupBy("bucket") .count() .orderBy("bucket") ) # convert bucket number to the corresponding value histogram_data = histogram_data.selectExpr(f"round({min_val} + (bucket - 1/2) * {step},2) as value", "count as N_events") # The action toPandas() here triggers the computation. # Histogram data is fetched into the driver as a Pandas Dataframe. %time histogram_data_pandas=histogram_data.toPandas() import matplotlib.pyplot as plt plt.style.use('seaborn-darkgrid') plt.rcParams.update({'font.size': 20, 'figure.figsize': [14,10]}) # cut the first and last bin x = histogram_data_pandas.iloc[1:-1]["value"] y = histogram_data_pandas.iloc[1:-1]["N_events"] # line plot f, ax = plt.subplots() ax.plot(x, y, '-') ax.set_xlim(min_val, max_val) ax.set_xlabel('$p_T$ (GeV)') ax.set_ylabel('Number of Events') ax.set_title("Distribution of $p_T$ ") plt.show() """ Explanation: Benchmark task: Q3 Plot the $𝑝_𝑇$ of jets with |𝜂| < 1 (𝜂 is the jet pseudorapidity). End of explanation """ # This will use MET adn Jet_pt df_events.select("MET_pt","Jet_pt").show(10,False) # The filter ispushed inside arrays of Jet_pt # This use Spark's higher order functions for array processing q4 = df_events.select("MET_pt").where("cardinality(filter(Jet_pt, x -> x > 40)) > 1") q4.show(5,False) # compute the histogram for MET_pt min_val = 0 max_val = 100 num_bins = 100 step = (max_val - min_val) / num_bins histogram_data = ( q4 .selectExpr(f"width_bucket(MET_pt, {min_val}, {max_val}, {num_bins}) as bucket") .groupBy("bucket") .count() .orderBy("bucket") ) # convert bucket number to the corresponding value histogram_data = histogram_data.selectExpr(f"round({min_val} + (bucket - 1/2) * {step},2) as value", "count as N_events") # The action toPandas() here triggers the computation. # Histogram data is fetched into the driver as a Pandas Dataframe. %time histogram_data_pandas=histogram_data.toPandas() import matplotlib.pyplot as plt plt.style.use('seaborn-darkgrid') plt.rcParams.update({'font.size': 20, 'figure.figsize': [14,10]}) # cut the first and last bin x = histogram_data_pandas.iloc[1:-1]["value"] y = histogram_data_pandas.iloc[1:-1]["N_events"] # line plot f, ax = plt.subplots() ax.plot(x, y, '-') ax.set_xlim(min_val, max_val) ax.set_xlabel('$𝐸^{𝑚𝑖𝑠𝑠}_T$ (GeV)') ax.set_ylabel('Number of Events') ax.set_title("Distribution of $𝐸^{𝑚𝑖𝑠𝑠}_T$ ") plt.show() """ Explanation: Benchmark task: Q4 Plot the $𝐸^{𝑚𝑖𝑠𝑠}_𝑇$ of the events that have at least two jets with $𝑝_𝑇$ > 40 GeV (gigaelectronvolt). End of explanation """ # filter the events # select only events with 2 muons # the 2 muons must have opposite charge df_muons = df_events.filter("nMuon == 2").filter("Muon_charge[0] != Muon_charge[1]") # Formula for dimuon mass in pt, eta, phi, m coordinates # see also http://edu.itp.phys.ethz.ch/hs10/ppp1/2010_11_02.pdf # and https://en.wikipedia.org/wiki/Invariant_mass df_with_dimuonmass = df_muons.selectExpr("MET_pt",""" sqrt(2 * Muon_pt[0] * Muon_pt[1] * ( cosh(Muon_eta[0] - Muon_eta[1]) - cos(Muon_phi[0] - Muon_phi[1]) ) ) as Dimuon_mass """) # apply a filter on the dimuon mass Q5 = df_with_dimuonmass.filter("Dimuon_mass between 60 and 120") # compute the histogram for MET_pt min_val = 0 max_val = 100 num_bins = 100 step = (max_val - min_val) / num_bins histogram_data = ( Q5 .selectExpr(f"width_bucket(MET_pt, {min_val}, {max_val}, {num_bins}) as bucket") .groupBy("bucket") .count() .orderBy("bucket") ) # convert bucket number to the corresponding dimoun mass value histogram_data = histogram_data.selectExpr(f"round({min_val} + (bucket - 1/2) * {step},2) as value", "count as N_events") # The action toPandas() here triggers the computation. # Histogram data is fetched into the driver as a Pandas Dataframe. %time histogram_data_pandas=histogram_data.toPandas() import matplotlib.pyplot as plt plt.style.use('seaborn-darkgrid') plt.rcParams.update({'font.size': 20, 'figure.figsize': [14,10]}) # cut the first and last bin x = histogram_data_pandas.iloc[1:-1]["value"] y = histogram_data_pandas.iloc[1:-1]["N_events"] # line plot f, ax = plt.subplots() ax.plot(x, y, '-') ax.set_xlabel('$𝐸^{𝑚𝑖𝑠𝑠}_T$ (GeV)') ax.set_ylabel('Number of Events') ax.set_title("Distribution of $𝐸^{𝑚𝑖𝑠𝑠}_T$ ") plt.show() spark.stop() """ Explanation: Benchmark task: Q5 Plot the $𝐸^{𝑚𝑖𝑠𝑠}_T$ of events that have an opposite-charge muon pair with an invariant mass between 60 GeV and 120 GeV. End of explanation """
hvillanua/deep-learning
transfer-learning/Transfer_Learning_Solution.ipynb
mit
from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm vgg_dir = 'tensorflow_vgg/' # Make sure vgg exists if not isdir(vgg_dir): raise Exception("VGG directory doesn't exist!") class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(vgg_dir + "vgg16.npy"): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar: urlretrieve( 'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy', vgg_dir + 'vgg16.npy', pbar.hook) else: print("Parameter file already exists!") """ Explanation: Transfer Learning Most of the time you won't want to train a whole convolutional network yourself. Modern ConvNets training on huge datasets like ImageNet take weeks on multiple GPUs. Instead, most people use a pretrained network either as a fixed feature extractor, or as an initial network to fine tune. In this notebook, you'll be using VGGNet trained on the ImageNet dataset as a feature extractor. Below is a diagram of the VGGNet architecture. <img src="assets/cnnarchitecture.jpg" width=700px> VGGNet is great because it's simple and has great performance, coming in second in the ImageNet competition. The idea here is that we keep all the convolutional layers, but replace the final fully connected layers with our own classifier. This way we can use VGGNet as a feature extractor for our images then easily train a simple classifier on top of that. What we'll do is take the first fully connected layer with 4096 units, including thresholding with ReLUs. We can use those values as a code for each image, then build a classifier on top of those codes. You can read more about transfer learning from the CS231n course notes. Pretrained VGGNet We'll be using a pretrained network from https://github.com/machrisaa/tensorflow-vgg. This is a really nice implementation of VGGNet, quite easy to work with. The network has already been trained and the parameters are available from this link. End of explanation """ import tarfile dataset_folder_path = 'flower_photos' class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile('flower_photos.tar.gz'): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar: urlretrieve( 'http://download.tensorflow.org/example_images/flower_photos.tgz', 'flower_photos.tar.gz', pbar.hook) if not isdir(dataset_folder_path): with tarfile.open('flower_photos.tar.gz') as tar: tar.extractall() tar.close() """ Explanation: Flower power Here we'll be using VGGNet to classify images of flowers. To get the flower dataset, run the cell below. This dataset comes from the TensorFlow inception tutorial. End of explanation """ import os import numpy as np import tensorflow as tf from tensorflow_vgg import vgg16 from tensorflow_vgg import utils data_dir = 'flower_photos/' contents = os.listdir(data_dir) classes = [each for each in contents if os.path.isdir(data_dir + each)] """ Explanation: ConvNet Codes Below, we'll run through all the images in our dataset and get codes for each of them. That is, we'll run the images through the VGGNet convolutional layers and record the values of the first fully connected layer. We can then write these to a file for later when we build our own classifier. Here we're using the vgg16 module from tensorflow_vgg. The network takes images of size $244 \times 224 \times 3$ as input. Then it has 5 sets of convolutional layers. The network implemented here has this structure (copied from the source code: ``` self.conv1_1 = self.conv_layer(bgr, "conv1_1") self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2") self.pool1 = self.max_pool(self.conv1_2, 'pool1') self.conv2_1 = self.conv_layer(self.pool1, "conv2_1") self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2") self.pool2 = self.max_pool(self.conv2_2, 'pool2') self.conv3_1 = self.conv_layer(self.pool2, "conv3_1") self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2") self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3") self.pool3 = self.max_pool(self.conv3_3, 'pool3') self.conv4_1 = self.conv_layer(self.pool3, "conv4_1") self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2") self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3") self.pool4 = self.max_pool(self.conv4_3, 'pool4') self.conv5_1 = self.conv_layer(self.pool4, "conv5_1") self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2") self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3") self.pool5 = self.max_pool(self.conv5_3, 'pool5') self.fc6 = self.fc_layer(self.pool5, "fc6") self.relu6 = tf.nn.relu(self.fc6) ``` So what we want are the values of the first fully connected layer, after being ReLUd (self.relu6). To build the network, we use with tf.Session() as sess: vgg = vgg16.Vgg16() input_ = tf.placeholder(tf.float32, [None, 224, 224, 3]) with tf.name_scope("content_vgg"): vgg.build(input_) This creates the vgg object, then builds the graph with vgg.build(input_). Then to get the values from the layer, feed_dict = {input_: images} codes = sess.run(vgg.relu6, feed_dict=feed_dict) End of explanation """ # Set the batch size higher if you can fit in in your GPU memory batch_size = 10 codes_list = [] labels = [] batch = [] codes = None with tf.Session() as sess: vgg = vgg16.Vgg16() input_ = tf.placeholder(tf.float32, [None, 224, 224, 3]) with tf.name_scope("content_vgg"): vgg.build(input_) for each in classes: print("Starting {} images".format(each)) class_path = data_dir + each files = os.listdir(class_path) for ii, file in enumerate(files, 1): # Add images to the current batch # utils.load_image crops the input images for us, from the center img = utils.load_image(os.path.join(class_path, file)) batch.append(img.reshape((1, 224, 224, 3))) labels.append(each) # Running the batch through the network to get the codes if ii % batch_size == 0 or ii == len(files): images = np.concatenate(batch) feed_dict = {input_: images} codes_batch = sess.run(vgg.relu6, feed_dict=feed_dict) # Here I'm building an array of the codes if codes is None: codes = codes_batch else: codes = np.concatenate((codes, codes_batch)) # Reset to start building the next batch batch = [] print('{} images processed'.format(ii)) # write codes to file with open('codes', 'w') as f: codes.tofile(f) # write labels to file import csv with open('labels', 'w') as f: writer = csv.writer(f, delimiter='\n') writer.writerow(labels) """ Explanation: Below I'm running images through the VGG network in batches. End of explanation """ # read codes and labels from file import csv with open('labels') as f: reader = csv.reader(f, delimiter='\n') labels = np.array([each for each in reader if len(each) > 0]).squeeze() with open('codes') as f: codes = np.fromfile(f, dtype=np.float32) codes = codes.reshape((len(labels), -1)) """ Explanation: Building the Classifier Now that we have codes for all the images, we can build a simple classifier on top of them. The codes behave just like normal input into a simple neural network. Below I'm going to have you do most of the work. End of explanation """ from sklearn.preprocessing import LabelBinarizer lb = LabelBinarizer() lb.fit(labels) labels_vecs = lb.transform(labels) """ Explanation: Data prep As usual, now we need to one-hot encode our labels and create validation/test sets. First up, creating our labels! Exercise: From scikit-learn, use LabelBinarizer to create one-hot encoded vectors from the labels. End of explanation """ from sklearn.model_selection import StratifiedShuffleSplit ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2) train_idx, val_idx = next(ss.split(codes, labels)) half_val_len = int(len(val_idx)/2) val_idx, test_idx = val_idx[:half_val_len], val_idx[half_val_len:] train_x, train_y = codes[train_idx], labels_vecs[train_idx] val_x, val_y = codes[val_idx], labels_vecs[val_idx] test_x, test_y = codes[test_idx], labels_vecs[test_idx] print("Train shapes (x, y):", train_x.shape, train_y.shape) print("Validation shapes (x, y):", val_x.shape, val_y.shape) print("Test shapes (x, y):", test_x.shape, test_y.shape) """ Explanation: Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use StratifiedShuffleSplit from scikit-learn. You can create the splitter like so: ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2) Then split the data with splitter = ss.split(x, y) ss.split returns a generator of indices. You can pass the indices into the arrays to get the split sets. The fact that it's a generator means you either need to iterate over it, or use next(splitter) to get the indices. Be sure to read the documentation and the user guide. Exercise: Use StratifiedShuffleSplit to split the codes and labels into training, validation, and test sets. End of explanation """ inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]]) labels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]]) fc = tf.contrib.layers.fully_connected(inputs_, 256) logits = tf.contrib.layers.fully_connected(fc, labels_vecs.shape[1], activation_fn=None) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels_, logits=logits) cost = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer().minimize(cost) predicted = tf.nn.softmax(logits) correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) """ Explanation: If you did it right, you should see these sizes for the training sets: Train shapes (x, y): (2936, 4096) (2936, 5) Validation shapes (x, y): (367, 4096) (367, 5) Test shapes (x, y): (367, 4096) (367, 5) Classifier layers Once you have the convolutional codes, you just need to build a classfier from some fully connected layers. You use the codes as the inputs and the image labels as targets. Otherwise the classifier is a typical neural network. Exercise: With the codes and labels loaded, build the classifier. Consider the codes as your inputs, each of them are 4096D vectors. You'll want to use a hidden layer and an output layer as your classifier. Remember that the output layer needs to have one unit for each class and a softmax activation function. Use the cross entropy to calculate the cost. End of explanation """ def get_batches(x, y, n_batches=10): """ Return a generator that yields batches from arrays x and y. """ batch_size = len(x)//n_batches for ii in range(0, n_batches*batch_size, batch_size): # If we're not on the last batch, grab data with size batch_size if ii != (n_batches-1)*batch_size: X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size] # On the last batch, grab the rest of the data else: X, Y = x[ii:], y[ii:] # I love generators yield X, Y """ Explanation: Batches! Here is just a simple way to do batches. I've written it so that it includes all the data. Sometimes you'll throw out some data at the end to make sure you have full batches. Here I just extend the last batch to include the remaining data. End of explanation """ epochs = 10 iteration = 0 saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(epochs): for x, y in get_batches(train_x, train_y): feed = {inputs_: x, labels_: y} loss, _ = sess.run([cost, optimizer], feed_dict=feed) print("Epoch: {}/{}".format(e+1, epochs), "Iteration: {}".format(iteration), "Training loss: {:.5f}".format(loss)) iteration += 1 if iteration % 5 == 0: feed = {inputs_: val_x, labels_: val_y} val_acc = sess.run(accuracy, feed_dict=feed) print("Epoch: {}/{}".format(e, epochs), "Iteration: {}".format(iteration), "Validation Acc: {:.4f}".format(val_acc)) saver.save(sess, "checkpoints/flowers.ckpt") """ Explanation: Training Here, we'll train the network. Exercise: So far we've been providing the training code for you. Here, I'm going to give you a bit more of a challenge and have you write the code to train the network. Of course, you'll be able to see my solution if you need help. End of explanation """ with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) feed = {inputs_: test_x, labels_: test_y} test_acc = sess.run(accuracy, feed_dict=feed) print("Test accuracy: {:.4f}".format(test_acc)) %matplotlib inline import matplotlib.pyplot as plt from scipy.ndimage import imread """ Explanation: Testing Below you see the test accuracy. You can also see the predictions returned for images. End of explanation """ test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg' test_img = imread(test_img_path) plt.imshow(test_img) # Run this cell if you don't have a vgg graph built with tf.Session() as sess: input_ = tf.placeholder(tf.float32, [None, 224, 224, 3]) vgg = vgg16.Vgg16() vgg.build(input_) with tf.Session() as sess: img = utils.load_image(test_img_path) img = img.reshape((1, 224, 224, 3)) feed_dict = {input_: img} code = sess.run(vgg.relu6, feed_dict=feed_dict) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) feed = {inputs_: code} prediction = sess.run(predicted, feed_dict=feed).squeeze() plt.imshow(test_img) plt.barh(np.arange(5), prediction) _ = plt.yticks(np.arange(5), lb.classes_) """ Explanation: Below, feel free to choose images and see how the trained classifier predicts the flowers in them. End of explanation """
kmclaugh/fastai_courses
ai-playground/Keras_Linear_Regression_Example.ipynb
apache-2.0
%matplotlib inline import pandas as pd import numpy as np import seaborn as sns from keras.layers import Dense from keras.models import Model, Sequential from keras import initializers """ Explanation: Keras Model for a Simple Linear Function In this notebook, I've created a simple Keras model to approximate a linear function. I am new to Keras and deep learning in general and this exercise really helped me understand what's happening in a Keras neural network model. Trying to predict a simple linear function like this with a neural network is, of course, overkill. But using a linear function makes it easy to see how different aspects of a Keras model, like the learning rate, input normalization, stochastic gradient descent, and dataset size, affect the performance of the model, without getting confused by image processing or other concepts necessary to understand other neural network examples. Plot inline and import all necessary libraries and functions End of explanation """ ## Set the mean, standard deviation, and size of the dataset, respectively mu, sigma, size = 0, 4, 100 ## Set the slope (m) and y-intercept (b), respectively m, b = 2, 100 ## Create a uniformally distributed set of X values between 0 and 10 and store in pandas dataframe x = np.random.uniform(0,10, size) df = pd.DataFrame({'x':x}) ## Find the "perfect" y value corresponding to each x value given df['y_perfect'] = df['x'].apply(lambda x: m*x+b) ## Create some noise and add it to each "perfect" y value to create a realistic y dataset df['noise'] = np.random.normal(mu, sigma, size=(size,)) df['y'] = df['y_perfect']+df['noise'] ## Plot our noisy dataset with a standard linear regression ## (note seaborn, the plotting library, does the linear regression by default) ax1 = sns.regplot(x='x', y='y', data=df) """ Explanation: Create a dataset that approximates a linear function with some noise End of explanation """ from keras.callbacks import Callback class PrintAndSaveWeights(Callback): """ Print and save the weights after each epoch. """ def on_train_begin(self, logs={}): """ Create our weights history list when we begin training """ self.weights_history = {"m":[], "b":[]} def on_epoch_end(self, batch, logs={}): """ At the end of every epoch, save and print our slope and intercept weights """ ## Get the current weights current_m = self.model.layers[-1].get_weights()[0][0][0] current_b = self.model.layers[-1].get_weights()[1][0] ## Save them to hour history object self.weights_history['m'].append(current_m) self.weights_history['b'].append(current_b) ## Print them after each epoch print "\nm=%.2f b=%.2f\n" % (current_m, current_b) ## Initialize our callback function for use in the model later print_save_weights = PrintAndSaveWeights() """ Explanation: Create a callback function so we can track the progress of our predictions through epochs End of explanation """ ## Create our model with a single dense layer, with a linear activation function and glorot (Xavier) input normalization model = Sequential([ Dense(1, activation='linear', input_shape=(1,), kernel_initializer='glorot_uniform') ]) ## Compile our model using the method of least squares (mse) loss function ## and a stochastic gradient descent (sgd) optimizer model.compile(loss='mse', optimizer='sgd') ## To try our model with an Adam optimizer simple replace 'sgd' with 'Adam' ## Set our learning rate to 0.01 and print it model.optimizer.lr.set_value(.001) print model.optimizer.lr.get_value() ## Fit our model to the noisy data we create above. Notes: ## The validation split parameter reserves 20% of our data for validation (ie 80% will be used for training) ## The callback parameter is where we tell our model to use the callback function created above ## I don't really know if using a batch size of 1 makes sense history = model.fit(x=df['x'], y=df['y'], validation_split=0.2, batch_size=1, epochs=100, callbacks=[print_save_weights]) ## As the model is fitting the data you can watch below and see how our m and b parameters are improving ## Save and print our final weights predicted_m = model.get_weights()[0][0][0] predicted_b = model.get_weights()[1][0] print "\nm=%.2f b=%.2f\n" % (predicted_m, predicted_b) """ Explanation: Create our Keras model to approximate our linear function The goal of our model will be to find the weights that best predict the outputs, given the inputs. In our simple linear, example the weights are the slope (m) and y-intercept (b) of our line. To do so, we are using single "dense" or "fully connected layer" with a 'linear' activation function. To get a feel for how models work I tried a few different things: 1. I tried running the model with and without kernel initialization (eg Glorot or Xavier input normalization) 2. I changed the number of epochs 3. I changed the learning rate 4. I changed the amount of data (by adjusting the "size" parameter in the dataset creation cell 5. I changed the optimizer to 'Adam' End of explanation """ import matplotlib.pyplot as plt plt.plot(print_save_weights.weights_history['m']) plt.plot(print_save_weights.weights_history['b']) plt.title('Predicted Weights') plt.ylabel('weights') plt.xlabel('epoch') plt.legend(['m', 'b'], loc='upper left') plt.show() """ Explanation: Plot our model's slope (m) and y-intercept (b) guesses over each epoch Seeing this plot really helped me understand how the model improves its guesses over each epoch End of explanation """ plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'valid'], loc='upper right') plt.show() """ Explanation: Plot our model's loss function over time Seeing this plot really helped me understand how the model is improving its loss over each epoch. End of explanation """ ## Create our predicted y's based on the model df['y_predicted'] = df['x'].apply(lambda x: predicted_m*x + predicted_b) ## Plot the original data with a standard linear regression ax1 = sns.regplot(x='x', y='y', data=df, label='real') ## Plot our predicted line based on our Keras model's slope and y-intercept ax2 = sns.regplot(x='x', y='y_predicted', data=df, scatter=False, label='predicted') ax2.legend(loc="upper left") """ Explanation: Plot our model's prediction over the data and real line End of explanation """
aschaffn/phys202-2015-work
assignments/assignment04/MatplotlibEx02.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import numpy as np """ Explanation: Matplotlib Exercise 2 Imports End of explanation """ !head -n 30 open_exoplanet_catalogue.txt """ Explanation: Exoplanet properties Over the past few decades, astronomers have discovered thousands of extrasolar planets. The following paper describes the properties of some of these planets. http://iopscience.iop.org/1402-4896/2008/T130/014001 Your job is to reproduce Figures 2 and 4 from this paper using an up-to-date dataset of extrasolar planets found on this GitHub repo: https://github.com/OpenExoplanetCatalogue/open_exoplanet_catalogue A text version of the dataset has already been put into this directory. The top of the file has documentation about each column of data: End of explanation """ data = np.genfromtxt('open_exoplanet_catalogue.txt', delimiter=',', comments = '#') assert data.shape==(1993,24) """ Explanation: Use np.genfromtxt with a delimiter of ',' to read the data into a NumPy array called data: End of explanation """ import math # get rid of the missing values - they seem to cause problems with hist complete = [x for x in data[:,2] if not math.isnan(x)] max(complete) # plt.hist(complete, bins=50) f = plt.figure(figsize = (6,4)) plt.hist(complete, bins = 25, range = (0,20)) plt.xlabel("Planetary Masses (Jupiter Units)") plt.ylabel("Frequency") plt.title("Distribution of Planetary masses less than 20 Jupiter units\n") assert True # leave for grading """ Explanation: Make a histogram of the distribution of planetary masses. This will reproduce Figure 2 in the original paper. Customize your plot to follow Tufte's principles of visualizations. Customize the box, grid, spines and ticks to match the requirements of this data. Pick the number of bins for the histogram appropriately. End of explanation """ y = data[:,6] x = data[:,5] # plt.scatter(x,y) plt.semilogx(x,y,'bo', alpha = .2, ms = 4) plt.xlabel("Semi-Major Axis (AU)") plt.ylabel("Orbital Eccentricity") plt.title("Orbital Eccentricity veruss Semi-Major Axis\n") plt.scatter(x,np.log(y)) assert True # leave for grading """ Explanation: Make a scatter plot of the orbital eccentricity (y) versus the semimajor axis. This will reproduce Figure 4 of the original paper. Use a log scale on the x axis. Customize your plot to follow Tufte's principles of visualizations. Customize the box, grid, spines and ticks to match the requirements of this data. End of explanation """
statsmodels/statsmodels.github.io
v0.13.0/examples/notebooks/generated/theta-model.ipynb
bsd-3-clause
import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas_datareader as pdr import seaborn as sns plt.rc("figure", figsize=(16, 8)) plt.rc("font", size=15) plt.rc("lines", linewidth=3) sns.set_style("darkgrid") """ Explanation: The Theta Model The Theta model of Assimakopoulos & Nikolopoulos (2000) is a simple method for forecasting the involves fitting two $\theta$-lines, forecasting the lines using a Simple Exponential Smoother, and then combining the forecasts from the two lines to produce the final forecast. The model is implemented in steps: Test for seasonality Deseasonalize if seasonality detected Estimate $\alpha$ by fitting a SES model to the data and $b_0$ by OLS. Forecast the series Reseasonalize if the data was deseasonalized. The seasonality test examines the ACF at the seasonal lag $m$. If this lag is significantly different from zero then the data is deseasonalize using statsmodels.tsa.seasonal_decompose use either a multiplicative method (default) or additive. The parameters of the model are $b_0$ and $\alpha$ where $b_0$ is estimated from the OLS regression $$ X_t = a_0 + b_0 (t-1) + \epsilon_t $$ and $\alpha$ is the SES smoothing parameter in $$ \tilde{X}t = (1-\alpha) X_t + \alpha \tilde{X}{t-1} $$ The forecasts are then $$ \hat{X}{T+h|T} = \frac{\theta-1}{\theta} \hat{b}_0 \left[h - 1 + \frac{1}{\hat{\alpha}} - \frac{(1-\hat{\alpha})^T}{\hat{\alpha}} \right] + \tilde{X}{T+h|T} $$ Ultimately $\theta$ only plays a role in determining how much the trend is damped. If $\theta$ is very large, then the forecast of the model is identical to that from an Integrated Moving Average with a drift, $$ X_t = X_{t-1} + b_0 + (\alpha-1)\epsilon_{t-1} + \epsilon_t. $$ Finally, the forecasts are reseasonalized if needed. This module is based on: Assimakopoulos, V., & Nikolopoulos, K. (2000). The theta model: a decomposition approach to forecasting. International journal of forecasting, 16(4), 521-530. Hyndman, R. J., & Billah, B. (2003). Unmasking the Theta method. International Journal of Forecasting, 19(2), 287-290. Fioruci, J. A., Pellegrini, T. R., Louzada, F., & Petropoulos, F. (2015). The optimized theta method. arXiv preprint arXiv:1503.03529. Imports We start with the standard set of imports and some tweaks to the default matplotlib style. End of explanation """ reader = pdr.fred.FredReader(["HOUST"], start="1980-01-01", end="2020-04-01") data = reader.read() housing = data.HOUST housing.index.freq = housing.index.inferred_freq ax = housing.plot() """ Explanation: Load some Data We will first look at housing starts using US data. This series is clearly seasonal but does not have a clear trend during the same. End of explanation """ from statsmodels.tsa.forecasting.theta import ThetaModel tm = ThetaModel(housing) res = tm.fit() print(res.summary()) """ Explanation: We fit specify the model without any options and fit it. The summary shows that the data was deseasonalized using the multiplicative method. The drift is modest and negative, and the smoothing parameter is fairly low. End of explanation """ forecasts = {"housing": housing} for year in range(1995, 2020, 2): sub = housing[: str(year)] res = ThetaModel(sub).fit() fcast = res.forecast(24) forecasts[str(year)] = fcast forecasts = pd.DataFrame(forecasts) ax = forecasts["1995":].plot(legend=False) children = ax.get_children() children[0].set_linewidth(4) children[0].set_alpha(0.3) children[0].set_color("#000000") ax.set_title("Housing Starts") plt.tight_layout(pad=1.0) """ Explanation: The model is first and foremost a forecasting method. Forecasts are produced using the forecast method from fitted model. Below we produce a hedgehog plot by forecasting 2-years ahead every 2 years. Note: the default $\theta$ is 2. End of explanation """ tm = ThetaModel(np.log(housing), method="additive") res = tm.fit(use_mle=True) print(res.summary()) """ Explanation: We could alternatively fit the log of the data. Here it makes more sense to force the deseasonalizing to use the additive method, if needed. We also fit the model parameters using MLE. This method fits the IMA $$ X_t = X_{t-1} + \gamma\epsilon_{t-1} + \epsilon_t $$ where $\hat{\alpha}$ = $\min(\hat{\gamma}+1, 0.9998)$ using statsmodels.tsa.SARIMAX. The parameters are similar although the drift is closer to zero. End of explanation """ res.forecast_components(12) """ Explanation: The forecast only depends on the forecast trend component, $$ \hat{b}_0 \left[h - 1 + \frac{1}{\hat{\alpha}} - \frac{(1-\hat{\alpha})^T}{\hat{\alpha}} \right], $$ the forecast from the SES (which does not change with the horizon), and the seasonal. These three components are available using the forecast_components. This allows forecasts to be constructed using multiple choices of $\theta$ using the weight expression above. End of explanation """ reader = pdr.fred.FredReader(["NA000349Q"], start="1980-01-01", end="2020-04-01") pce = reader.read() pce.columns = ["PCE"] pce.index.freq = "QS-OCT" _ = pce.plot() """ Explanation: Personal Consumption Expenditure We next look at personal consumption expenditure. This series has a clear seasonal component and a drift. End of explanation """ mod = ThetaModel(np.log(pce)) res = mod.fit() print(res.summary()) """ Explanation: Since this series is always positive, we model the $\ln$. End of explanation """ forecasts = pd.DataFrame( { "ln PCE": np.log(pce.PCE), "theta=1.2": res.forecast(12, theta=1.2), "theta=2": res.forecast(12), "theta=3": res.forecast(12, theta=3), "No damping": res.forecast(12, theta=np.inf), } ) _ = forecasts.tail(36).plot() plt.title("Forecasts of ln PCE") plt.tight_layout(pad=1.0) """ Explanation: Next we explore differenced in the forecast as $\theta$ changes. When $\theta$ is close to 1, the drift is nearly absent. As $\theta$ increases, the drift becomes more obvious. End of explanation """ ax = res.plot_predict(24, theta=2) """ Explanation: Finally, plot_predict can be used to visualize the predictions and prediction intervals which are constructed assuming the IMA is true. End of explanation """ ln_pce = np.log(pce.PCE) forecasts = {"ln PCE": ln_pce} for year in range(1995, 2020, 3): sub = ln_pce[: str(year)] res = ThetaModel(sub).fit() fcast = res.forecast(12) forecasts[str(year)] = fcast forecasts = pd.DataFrame(forecasts) ax = forecasts["1995":].plot(legend=False) children = ax.get_children() children[0].set_linewidth(4) children[0].set_alpha(0.3) children[0].set_color("#000000") ax.set_title("ln PCE") plt.tight_layout(pad=1.0) """ Explanation: We conclude be producing a hedgehog plot using 2-year non-overlapping samples. End of explanation """
ucsdlib/python-novice-inflammation
1-intro-to-numpy-short.ipynb
cc0-1.0
import numpy """ Explanation: Analyzing patient data Words are useful, but what’s more useful are the sentences and stories we build with them. A lot of powerful tools are built into languages like Python, even more live in the libraries they are used to build We need to import a library called NumPy Use this library to do fancy things with numbers (e.g. if you have matrices or arrays). End of explanation """ #assuming the data file is in the data/ folder numpy.loadtxt(fname='data/inflammation-01.csv', delimiter=',') """ Explanation: Importing a library akin to getting lab equipment out of a locker and setting up on bench Libraries provide additional functionality With NumPy loaded we can read the CSV into python. End of explanation """ data = numpy.loadtxt(fname='data/inflammation-01.csv', delimiter=',') print(data) weight_kg = 55 #assigns value 55 to weight_kg print(weight_kg) #we can print to the screen print("weight in kg", weight_kg) weight_kg = 70 print("weight in kg", weight_kg) """ Explanation: numpy.loadtex() is a function call, runs loadtxt in numpy uses dot notation to access thing.component two parameters: filename and delimiter - both character strings (") we didn't save in memory using a variable variables in python must start with letter & are case sensitive assignment operator is = let's look at assigning this inflammation data to a variable End of explanation """ weight_kg * 2 weight_lb = weight_kg * 2.2 print('weigh in lb:', weight_lb) print("weight in lb:", weight_kg*2.2) print(data) """ Explanation: print above shows several things at once by separating with commas variable as putting sticky note on value means assigning a value to one variable does not chage the value of other variables. End of explanation """ whos """ Explanation: whos #ipython command to see what variables & mods you have End of explanation """ print(data) print(type(data)) #we can get type of object """ Explanation: What does the following program print out? python first, second = 'Grace', 'Hopper' third, fourth = second, first print(third, fourth) End of explanation """ print(data.shape) """ Explanation: data refers to N-dimensional array data corres. to patients' inflammation let's look at the shape of the data End of explanation """ print('first value in data', data[0,0]) #use index in square brackets print('4th value in data', data[0,3]) #use index in square brackets print('first value in 3rd row data', data[3,0]) #use index in square brackets !head -3 data/inflammation-01.csv print('middle value in data', data[30,20]) # get the middle value - notice here i didn't use print """ Explanation: data has 60 rows and 40 columns when we created data with numpy it also creates members or attributes extra info describes data like adjective does a noun dot notation to access members End of explanation """ data[0:4, 0:10] #select whole sections of matrix, 1st 10 days & 4 patients """ Explanation: programming languages like MATLAB and R start counting at 1 languages in C family (C++, Java, Perl & python) we have MxN array in python, indices go from 0 to M-1 on the first axis and 0 to N-1 on second indices are (row, column) End of explanation """ data[5:10,0:10] """ Explanation: slice 0:4 means start at 0 and go up to but not include 4 up-to-but-not-including takes a bit of getting used to End of explanation """ data[:3, 36:] """ Explanation: dont' have to include uper and lower bound python uses 0 by default if we don't include lower no upper slice runs to the axis : will include everything End of explanation """ element = 'oxygen' print('first three characters:', element[0:3]) print('last three characters:', element[3:6]) print(element[:4]) print(element[4:]) print(:) #oxygen print(element[-1]) print(element[-2]) print(element[2:-1]) doubledata = data * 2.0 #we can perform math on array """ Explanation: A section of an array is called a slice. We can take slices of character strings as well: python element = 'oxygen' print('first three characters:', element[0:3]) print('last three characters:', element[3:6]) first three characters: oxy last three characters: gen What is the value of element[:4]? What about element[4:]? Or element[:]? What is element[-1]? What is element[-2]? Given those answers, explain what element[1:-1] does. End of explanation """ doubledata data[:3, 36:] doubledata[:3, 36:] """ Explanation: operation on arrays is done on each individual element of the array End of explanation """ tripledata = doubledata + data print('tripledata:') print(tripledata[:3, 36:]) """ Explanation: we can also do arithmetic operation with another array of same shape (same dims) End of explanation """ print(data.mean()) """ Explanation: we can do more than simple arithmetic let's take average inflammation for patients End of explanation """ print('maximum inflammation: ', data.max()) print('minimum inflammation: ', data.min()) print('standard deviation:', data.std()) """ Explanation: mean is a method of the array (function) variables are nouns, methods are verbs - they are what the thing knows how to do for mean we need empty () parense even if we aren't passing in parameters to tell python to go do something data.shape doesn't need () because it's just a description NumPy arrays have lots of useful methods: End of explanation """ %matplotlib inline import matplotlib.pyplot as plt data """ Explanation: however, we are usually more interested in partial stats, e.g. max value per patient or the avg value per day we can create a new subset array of the data we want End of explanation """ plt.imshow(data) image = plt.imshow(data) plt.savefig('timsheatmap.png') """ Explanation: let's visualize this data with matplotlib library first we import the plyplot module from matplotlib End of explanation """ avg_inflam = data.mean(axis=0) #asix zero is by each day print(data.mean(axis=0)) print(data.mean(axis=0).shape) #Nx1 vector of averages print(data.mean(axis=1)) #avg inflam per patient across all days print(data.mean(axis=1).shape) """ Explanation: nice, but ipython/jupyter proved us with 'magic' functions and one lets us display our plot inline % indicates an ipython magic function what if we need max inflammation for all patients, or the average for each day? most array methods let us specify the axis we want to work on End of explanation """ print(avg_inflam) day_avg_plot = plt.plot(avg_inflam) """ Explanation: now let's look at avg inflammation over days (columns) End of explanation """ data.mean(axis=0).shape data.shape data.mean(axis=1).shape max_plot = plt.plot(data.max(axis=0)) """ Explanation: avg per day across all patients in the var day_avg_plot matplotlib create and display a line graph of those values End of explanation """
angelmtenor/data-science-keras
simple_stock_prediction.ipynb
mit
%matplotlib inline import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import keras import helper #helper.reproducible(seed=42) sns.set() """ Explanation: Simple Stock Prediction Predicting Alphabet Inc. stock price using a Recurrent Neural Network Dataset from Google Finance Based on RNN project: Time Series Prediction project of the Udacity's Artificial Intelligence Nanodegree End of explanation """ df = pd.read_csv('data/alphabet_stock.csv') print("Days: {}".format(len(df))) df.head(3) # Parse dates df['Date'] = pd.to_datetime(df['Date'], format='%d-%b-%y', errors='coerce') df.head(3) # Sort values by date df = df.sort_values(by='Date') df.plot(x='Date', y='Close') plt.ylabel("Share Price ($)"); # Only Close prices will be used data = df['Close'].values # Normalize data from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range=(-1, 1), copy=True) data = scaler.fit_transform(data.reshape(-1, 1)) data.reshape(-1); """ Explanation: Load and process the data End of explanation """ def window_transform_series(series, window_size): # containers for input/output pairs X = [] y = [] # fill input/output lists from the given sequence for i in range(len(series) - window_size): X.append(series[i:i + window_size]) # size: sliding window y.append(series[i + window_size]) # size: scalar # reshape each X = np.asarray(X) X.shape = (np.shape(X)[0:2]) y = np.asarray(y) y.shape = (len(y), 1) return X, y # window the data window_size = 28 # Four weeks X, y = window_transform_series(series=data, window_size=window_size) """ Explanation: Split into sequences Run a sliding window along the input series and creates associated input/output pairs End of explanation """ # split the dataset into training / test sets train_test_split = int(len(X) * 0.9) X_train = X[:train_test_split, :] y_train = y[:train_test_split] # keep the last chunk for testing X_test = X[train_test_split:, :] y_test = y[train_test_split:] # Keras's LSTM module requires input reshaped to [samples, window size, stepsize] X_train = np.asarray(np.reshape(X_train, (X_train.shape[0], window_size, 1))) X_test = np.asarray(np.reshape(X_test, (X_test.shape[0], window_size, 1))) """ Explanation: Split into training and test sets Dataset is not randomly split as the input/output pairs are related temporally End of explanation """ from keras.models import Sequential from keras.layers import Dense, LSTM model = Sequential() model.add(LSTM(256, input_shape=(window_size, 1))) model.add(Dense(1, activation=None)) model.summary() model.compile(loss='mean_squared_error', optimizer='rmsprop') # train the model print("\nTraining ...") callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, verbose=0)] %time history = model.fit(X_train, y_train, epochs=100, batch_size=1024, verbose=0, \ validation_split=0.2, callbacks=callbacks) helper.show_training(history) model_path = os.path.join("models", "simple_stock_prediction.h5") model.save(model_path) print("\nModel saved at", model_path) """ Explanation: Recurrent Neural Network End of explanation """ model = keras.models.load_model(model_path) print("Model loaded:", model_path) # print training and testing errors training_error = model.evaluate(X_train, y_train, verbose=0) print('\nTraining error = {:.4f}'.format(training_error)) testing_error = model.evaluate(X_test, y_test, verbose=0) print('Test error = {:.4f}'.format(testing_error)) train_predict = model.predict(X_train) test_predict = model.predict(X_test) # revert scaling data_or = scaler.inverse_transform(data) train_predict_or = scaler.inverse_transform(train_predict) test_predict_or = scaler.inverse_transform(test_predict) plt.figure(figsize=(14, 6)) # plot original series plt.plot(data_or, color='k') # plot training set prediction split_pt = train_test_split + window_size plt.plot(np.arange(window_size, split_pt, 1), train_predict_or, color='b') # plot testing set prediction plt.plot(np.arange(split_pt, split_pt + len(test_predict_or), 1), test_predict_or, color='r') plt.xlabel('day') plt.ylabel('stock price') plt.legend( ['original series', 'training fit', 'testing fit'], loc='center left', bbox_to_anchor=(1, 0.5)); """ Explanation: Evaluate the model End of explanation """
gevero/py_matrix
py_matrix/examples/Gold Circular Magnetic Dichroism.ipynb
gpl-3.0
# libraries import numpy as np # numpy import scipy as sp # scipy import scipy.constants as sp_c # scientific constants import sys # sys to add py_matrix to the path # matplotlib inline plots import matplotlib.pylab as plt %matplotlib inline # adding py_matrix parent folder to python path sys.path.append('../../') import py_matrix as pm # importing py_matrix # useful parameters f_size=20; h_bar=sp_c.hbar/sp_c.eV # h_bar in eV """ Explanation: # Gold Nanoparticles Circular Magnetic Dichroism Example (CMD) In this notebook we calculate the MCD signal for gold nanoparticles following two different approaches. The first is a scalar approache, as in: Pineider, F.; Campo, G.; Bonanni, V.; Fernández, C. de J.; Mattei, G.; Caneschi, A.; Gatteschi, D.; Sangregorio, C. Circular Magnetoplasmonic Modes in Gold Nanoparticles. Nano Lett. 2013, 13, 4785–4789. the second is the full tensorial Transfer Matrix approach, as in: Maccaferri, N.; González-Díaz, J. B.; Bonetti, S.; Berger, A.; Kataja, M.; van Dijken, S.; Nogués, J.; Bonanni, V.; Pirzadeh, Z.; Dmitriev, A.; et al. Polarizability and Magnetoplasmonic Properties of Magnetic General Nanoellipsoids. Opt. Express 2013, 21, 9875–9889. The notebook is structured as follows: - Setup of useful settings and import of necessary libraries - Setup of scalar MCD functions - Inputs for the simulation - Computation - Plot Settings and libraries End of explanation """ # size corrected damping constant for plasma oscillation def gamma_r(gamma_inf,v_f,r): return gamma_inf + h_bar*v_f/(r*1e-9) # coupling function to external magnetic field def f_w(wl,w_p,gamma): w=1240/wl # nm to eV f_out=-h_bar*(sp_c.e/sp_c.m_e)*(w_p**2/w)*((gamma-1j*w)*(gamma-1j*w))/((gamma**2+w**2)**2) return f_out # magnetic field dependent polarizability def alpha_mattei(wl,eps,eps_m,f_w,f_m,D,B): norm=-np.pi*(D**3)/2.0 num=eps-eps_m+(f_w-f_m)*B den=eps+2.0*eps_m+(f_w-f_m)*B alpha=norm*num/den return alpha # scattering cross section def sigma_mattei(wl,eps_m,alpha): return (2.0*np.pi/wl)*np.sqrt(eps_m)*np.imag(alpha) """ Explanation: MCD functions as found in : Pineider, F.; Campo, G.; Bonanni, V.; Fernández, C. de J.; Mattei, G.; Caneschi, A.; Gatteschi, D.; Sangregorio, C. Circular Magnetoplasmonic Modes in Gold Nanoparticles. Nano Lett. 2013, 13, 4785–4789. We are going to compare our t-matrix calculations with the ones contained in the above paper, where the approach is simply to calculate a modified polarizability for left and right circularly polarized lighe taking into account the effect of the B field as a Lorentz force. All the functions defined below are found in the paper and in the Supporting Informations End of explanation """ # building the optical constant database, point the folder below to the "materials" py_matrix folder eps_db_out=pm.mat.generate_eps_db('../materials/',ext='*.edb') eps_files,eps_names,eps_db=eps_db_out['eps_files'],eps_db_out['eps_names'],eps_db_out['eps_db'] # multilayer and computation inputs D=13.0; # nanocluster diameter f=0.01; # filling factor for the effective medium containing the Au nanoclusters stack=['e_hexane','e_au','e_hexane'] # materials composing the stack, as taken from eps_db d_list=[0.0,D,0.0] # multilayer thicknesses: incident medium and substrate have zero thickness # wavelength wl_min=400 wl_max=800 wl_step=100; v_wl=np.linspace(wl_min,wl_max,wl_step); # polar and azimuthal angles in radians theta_0=0.0 phi_0=0.0 # drude metal parameters w_p=8.95 # plasma frequency gamma=0.02 # damping constant f_m=1.06e-6 # medium magnetic coupling v_f=1.4e6; # Fermi velocity gamma_inf=0.069; # bulk damping constant # B field in Tesla B=1.0 """ Explanation: Inputs End of explanation """ # Scalar MCD calculations following Nano Lett. 2013, 13, 4785–4789 v_sigma_plus=np.zeros_like(v_wl) v_sigma_minus=np.zeros_like(v_wl) v_sigma_zero=np.zeros_like(v_wl) for i_wl,wl in enumerate(v_wl): # Magnetic field corrections gamma_r0=gamma_r(gamma_inf,v_f,D/2) f_w0=f_w(wl,w_p,gamma_r0) # optical constants eps=pm.mat.db_to_eps(wl,eps_db,['e_au'])[0]; e_corr=pm.mat.eps_corr_drude(wl,w_p,gamma_inf,v_f,D/2) # mean free path correction eps=eps+e_corr; eps=np.real(eps)-1j*np.imag(eps); # flipping imaginary part size due to different conventions eps_m=np.real(pm.mat.db_to_eps(wl,eps_db,['e_hexane'])[0]) # polarizability and alpha=alpha_mattei(wl,eps,eps_m,f_w0,f_m,D,B);v_sigma_plus[i_wl]=sigma_mattei(wl,eps_m,alpha) # Left pol alpha=alpha_mattei(wl,eps,eps_m,f_w0,f_m,D,-B);v_sigma_minus[i_wl]=sigma_mattei(wl,eps_m,alpha) # Right pol alpha=alpha_mattei(wl,eps,eps_m,f_w0,f_m,D,0.0);v_sigma_zero[i_wl]=sigma_mattei(wl,eps_m,alpha) # B=0 v_mcd=(v_sigma_minus-v_sigma_plus)/v_sigma_zero.max() """ Explanation: Scalar MCD calculations following Nano Lett. 2013, 13, 4785–4789 End of explanation """ # polarization correction factors for gold nanoclusters a_x=D/2;a_y=D/2;a_z=D/2; m_L=pm.moe.m_L(a_x,a_y,a_z); m_D=pm.moe.m_D(a_x,a_y,a_z); V=pm.moe.f_V(a_x,a_y,a_z) # Full transfer matrix calculations v_A_r=np.zeros_like(v_wl);v_A_l=np.zeros_like(v_wl); v_A_p=np.zeros_like(v_wl);v_A_s=np.zeros_like(v_wl); m_eps=np.zeros((len(stack),3,3),dtype=np.complex128) for i_wl,wl in enumerate(v_wl): # retrieving optical constants at wl from the database e_list=pm.mat.db_to_eps(wl,eps_db,stack); e_corr=pm.mat.eps_corr_drude(wl,w_p,gamma_inf,v_f,D/2) #print e_corr e_list[1]=e_list[1]+e_corr e_xy=pm.mat.eps_xy_drude(wl, w_p, gamma_r0, B) # calculating the MG effective medium em=e_list[0] m_e1=np.identity(3)*em; m_e2=np.identity(3)*e_list[1]; m_e2[1,0]=e_xy;m_e2[0,1]=-e_xy; m_eff=pm.mat.m_eff_MG(m_L,m_D,V,m_e1,m_e2,wl,f) # filling dielectric tensor m_eps[:,0,0]=e_list m_eps[:,1,1]=e_list m_eps[:,2,2]=e_list m_eps[1,:,:]=m_eff # computing transmission matrix m_t_ps=pm.core.rt(wl,theta_0,phi_0,m_eps,d_list)['m_t_ps'] # extracting the mcd information from the transmission matrix n_0=np.sqrt(e_list[0]);n_s=np.sqrt(e_list[-1]); out=pm.utils.T_ps_rl(m_t_ps,theta_0,n_0,n_s) v_A_r[i_wl]=out['A_r'] v_A_l[i_wl]=out['A_l'] v_A_p[i_wl]=out['A_p'] v_A_s[i_wl]=out['A_s'] """ Explanation: Full Transfer Matrix Calculations The Maxwell-Garnett effective medium for the layer containing the gold nanoclusters is calculating following: Maccaferri, N.; González-Díaz, J. B.; Bonetti, S.; Berger, A.; Kataja, M.; van Dijken, S.; Nogués, J.; Bonanni, V.; Pirzadeh, Z.; Dmitriev, A.; et al. Polarizability and Magnetoplasmonic Properties of Magnetic General Nanoellipsoids. Opt. Express 2013, 21, 9875–9889. End of explanation """ # cmd plot fg2=plt.figure(num=2,figsize=(15,10)) plt.plot(v_wl,1e4*(v_A_r-v_A_l)/(v_A_r).max(),'r-', v_wl,1e4*v_mcd,'ko', markersize=10,linewidth=3); #-----ticks------ fsize=15; plt.xticks(fontsize=fsize+10);plt.yticks(fontsize=fsize+10); #------axis labels------ plt.xlabel(r'Wavelength (nm)',fontsize=fsize+15); plt.ylabel(r'$\Delta$A\(A$_{\mathrm{max}}*$B)(T$^{-1}*10^{4}$)',fontsize=fsize+15); #------plot legend------ plt.legend(('Tensorial','Scalar'), fontsize=fsize+15,loc='upper right',frameon=False); """ Explanation: Plot End of explanation """
matthewzimmer/traffic-sign-classification
Traffic_Signs_Recognition-WIP.ipynb
mit
import hashlib import os import pickle from urllib.request import urlretrieve import numpy as np from PIL import Image from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer from sklearn.utils import resample from tqdm import tqdm from zipfile import ZipFile import math import numpy as np import tensorflow as tf from tqdm import tqdm import matplotlib.pyplot as plt training_file = 'traffic-sign-data/train.p' testing_file = 'traffic-sign-data/test.p' with open(training_file, mode='rb') as f: train = pickle.load(f) with open(testing_file, mode='rb') as f: test = pickle.load(f) train_features, train_labels, train_size, train_coords = train['features'], train['labels'], train['sizes'], train['coords'] test_features, test_labels, test_size, test_coords = test['features'], test['labels'], test['sizes'], test['coords'] assert len(train_features) == len(train_labels), 'features must be same size as labels' # Reshape train features train_features = np.arange(len(train_features)*1024).reshape((len(train_features), 1024)) test_features = np.arange(len(test_features)*1024).reshape((len(test_features), 1024)) assert len(train_features) == len(train_labels), 'features must be same size as labels' """ Explanation: Self-Driving Car Engineer Nanodegree Deep Learning Project: Build a Traffic Sign Recognition Classifier In this notebook, a template is provided for you to implement your functionality in stages which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission, if necessary. Sections that begin with 'Implementation' in the header indicate where you should begin your implementation for your project. Note that some sections of implementation are optional, and will be marked with 'Optional' in the header. In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a 'Question' header. Carefully read each question and provide thorough answers in the following text boxes that begin with 'Answer:'. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide. Note: Code and Markdown cells can be executed using the Shift + Enter keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. Step 1: Dataset Exploration Visualize the German Traffic Signs Dataset. This is open ended, some suggestions include: plotting traffic signs images, plotting the count of each sign, etc. Be creative! The pickled data is a dictionary with 4 key/value pairs: features -> the images pixel values, (width, height, channels) labels -> the label of the traffic sign sizes -> the original width and height of the image, (width, height) coords -> coordinates of a bounding box around the sign in the image, (x1, y1, x2, y2). Based the original image (not the resized version). End of explanation """ _epochs_completed = 0 _index_in_epoch = 0 _num_examples = len(train_features) """ Explanation: Globals End of explanation """ """ Helper-function for flattening a layer A convolutional layer produces an output tensor with 4 dimensions. We will add fully-connected layers after the convolution layers, so we need to reduce the 4-dim tensor to 2-dim which can be used as input to the fully-connected layer. """ def flatten_layer(layer): # Get the shape of the input layer. layer_shape = layer.get_shape() # The shape of the input layer is assumed to be: # layer_shape == [num_images, img_height, img_width, num_channels] # The number of features is: img_height * img_width * num_channels # We can use a function from TensorFlow to calculate this. num_features = layer_shape[1:4].num_elements() # Reshape the layer to [num_images, num_features]. # Note that we just set the size of the second dimension # to num_features and the size of the first dimension to -1 # which means the size in that dimension is calculated # so the total size of the tensor is unchanged from the reshaping. layer_flat = tf.reshape(layer, [-1, num_features]) # The shape of the flattened layer is now: # [num_images, img_height * img_width * num_channels] # Return both the flattened layer and the number of features. return layer_flat, num_features # Problem 1 - Implement Min-Max scaling for greyscale image data def normalize_greyscale(image_data): """ Normalize the image data with Min-Max scaling to a range of [0.1, 0.9] :param image_data: The image data to be normalized :return: Normalized image data """ # ToDo: Implement Min-Max scaling for greyscale image data a = 0.1 b = 0.9 x_min = np.min(image_data) x_max = np.max(image_data) x_prime = [a + (((x-x_min)*(b-a))/(x_max-x_min)) for x in image_data] # print(image_data, ' normalized to ---> ', x_prime) return np.array(x_prime) def plot_images(X_dataset, labels, sample_size=10): count = 0 #book keeping for plots n_labels = len(labels) fig = plt.figure(figsize=(sample_size, n_labels)) grid = gridspec.GridSpec(n_labels, sample_size, wspace=0.0, hspace=0.0) labelset_pbar = tqdm(range(n_labels), desc='Sample test images', unit='labels') for i in labelset_pbar: ind = labels == i subset_x = X_dataset[ind,] #get all images that belong to class i for x in range(sample_size): img = random.choice(subset_x) #randomly pick one image from class i ax = plt.Subplot(fig, grid[count]) ax.set_xticks([]) ax.set_yticks([]) ax.imshow(img, cmap='gray') fig.add_subplot(ax) count +=1 # hide the borders if i == (n_labels-1): all_axes = fig.get_axes() for ax in all_axes: for sp in ax.spines.values(): sp.set_visible(False) """ Explanation: Helper functions End of explanation """ # [Adapted from Lesson 7 - MiniFlow] # Turn labels into numbers and apply One-Hot Encoding train_features = normalize_greyscale(train_features) test_features = normalize_greyscale(test_features) num_channels = 1 """ Explanation: Convert to greyscale since intuitively we know there are no two traffic signs with the same design differentiated by color End of explanation """ # number of training examples n_train = len(X_train) # number of testing examples n_test = len(X_test) # Tuple with height and width of images used to reshape arrays. image_shape = X_train.shape[1:3] # how many classes are in the dataset n_classes = len(np.unique(y_train)) img_size = image_shape[0] # Images are stored in one-dimensional arrays of this length. img_size_flat = img_size * img_size print("Number of training examples =", n_train) print("Number of testing examples =", n_test) print("img_size_flat:", img_size_flat) print("Image data shape =", image_shape) print("Number of classes =", n_classes) """ Explanation: Data Dimensions End of explanation """ def plot_images1(images, cls_true, cls_pred=None): # assert len(images) == len(cls_true) # Create figure with 3x3 sub-plots. fig, axes = plt.subplots(3, 3) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): # Plot image. ax.imshow(images[i], cmap='binary') # Show true and predicted classes. if cls_pred is None: xlabel = "True: {0}".format(cls_true[i]) else: xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i]) # Show the classes as the label on the x-axis. ax.set_xlabel(xlabel) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() """ Explanation: Helper-function for plotting images Function used to plot 9 images in a 3x3 grid, and writing the true and predicted classes below each image. End of explanation """ ### Data exploration visualization goes here. ### Feel free to use as many code cells as needed. # Get the first images from the test-set. images = X_test[0:9] # Get the true classes for those images. y_true = y_test[0:9] # Plot the images and labels using our helper-function above. plot_images(images=images, cls_true=y_true) """ Explanation: Data Exploration & Visualization End of explanation """ ### Preprocess the data here. ### Feel free to use as many code cells as needed. """ Explanation: Step 2: Design and Test a Model Architecture Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the German Traffic Sign Dataset. There are various aspects to consider when thinking about this problem: Your model can be derived from a deep feedforward net or a deep convolutional network. Play around preprocessing techniques (normalization, rgb to grayscale, etc) Number of examples per label (some have more than others). Generate fake data. Here is an example of a published baseline model on this problem. It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these. Implementation Use the code cell (or multiple code cells, if necessary) to implement the first step of your project. Once you have completed your implementation and are satisfied with the results, be sure to thoroughly answer the questions that follow. End of explanation """ ### Generate data additional (if you want to!) ### and split the data into training/validation/testing sets here. ### Feel free to use as many code cells as needed. """ Explanation: Question 1 Describe the techniques used to preprocess the data. Answer: End of explanation """ ### Define your architecture here. ### Feel free to use as many code cells as needed. """ Explanation: Question 2 Describe how you set up the training, validation and testing data for your model. If you generated additional data, why? Answer: End of explanation """ ### Train your model here. ### Feel free to use as many code cells as needed. """ Explanation: Question 3 What does your final architecture look like? (Type of model, layers, sizes, connectivity, etc.) For reference on how to build a deep neural network using TensorFlow, see Deep Neural Network in TensorFlow from the classroom. Answer: End of explanation """ ### Load the images and plot them here. ### Feel free to use as many code cells as needed. """ Explanation: Question 4 How did you train your model? (Type of optimizer, batch size, epochs, hyperparameters, etc.) Answer: Question 5 What approach did you take in coming up with a solution to this problem? Answer: Step 3: Test a Model on New Images Take several pictures of traffic signs that you find on the web or around you (at least five), and run them through your classifier on your computer to produce example results. The classifier might not recognize some local signs but it could prove interesting nonetheless. You may find signnames.csv useful as it contains mappings from the class id (integer) to the actual sign name. Implementation Use the code cell (or multiple code cells, if necessary) to implement the first step of your project. Once you have completed your implementation and are satisfied with the results, be sure to thoroughly answer the questions that follow. End of explanation """ ### Run the predictions here. ### Feel free to use as many code cells as needed. """ Explanation: Question 6 Choose five candidate images of traffic signs and provide them in the report. Are there any particular qualities of the image(s) that might make classification difficult? It would be helpful to plot the images in the notebook. Answer: End of explanation """ ### Visualize the softmax probabilities here. ### Feel free to use as many code cells as needed. """ Explanation: Question 7 Is your model able to perform equally well on captured pictures or a live camera stream when compared to testing on the dataset? Answer: End of explanation """
gregorjerse/rt2
2015_2016/lab13/Extending values on vertices.ipynb
gpl-3.0
from itertools import combinations, chain def simplex_closure(a): """Returns the generator that iterating over all subsimplices (of all dimensions) in the closure of the simplex a. The simplex a is also included. """ return chain.from_iterable([combinations(a, l) for l in range(1, len(a) + 1)]) def closure(K): """Add all missing subsimplices to K in order to make it a simplicial complex.""" return list({s for a in K for s in simplex_closure(a)}) def contained(a, b): """Returns True is a is a subsimplex of b, False otherwise.""" return all((v in b for v in a)) def star(s, cx): """Return the set of all simplices in the cx that contais simplex s. """ return {p for p in cx if contained(s, p)} def intersection(s1, s2): """Return the intersection of s1 and s2.""" return list(set(s1).intersection(s2)) def link(s, cx): """Returns link of the simplex s in the complex cx. """ # Link consists of all simplices from the closed star that have # empty intersection with s. return [c for c in closure(star(s, cx)) if not intersection(s, c)] def simplex_value(s, f, aggregate): """Return the value of f on vertices of s aggregated by the aggregate function. """ return aggregate([f[v] for v in s]) def lower_link(s, cx, f): """Return the lower link of the simplex s in the complex cx. The dictionary f is the mapping from vertices (integers) to the values on vertices. """ sval = simplex_value(s, f, min) return [s for s in link(s, cx) if simplex_value(s, f, max) < sval] """ Explanation: Extending values on vertices to a discrete gradient vector field During extension algorithm one has to compute lover_link for every vertex in the complex. So let us implement search for the lower link first. It requires quite a lot of code: first we find a star, then link and finally lower link for the given simplex. End of explanation """ K = closure([(1, 2, 3)]) f = {1: 0, 2: 1, 3: 2} for v in (1, 2, 3): print"{0}: {1}".format((v,), lower_link((v,), K, f)) """ Explanation: Let us test the above function on the simple example: full triangle with values 0, 1 and 2 on the vertices labeled with 1, 2 and 3. End of explanation """ def join(a, b): """Return the join of 2 simplices a and b.""" return tuple(sorted(set(a).union(b))) def extend(K, f): """Extend the field to the complex K. Function on vertices is given in f. Returns the pair V, C, where V is the dictionary containing discrete gradient vector field and C is the list of all critical cells. """ V = dict() C = [] for v in (s for s in K if len(s)==1): ll = lower_link(v, K, f) if len(ll) b== 0: C.append(v) else: V1, C1 = extend(ll, f) mv, mc = min([(f[c[0]], c) for c in C1 if len(c)==1]) V[v] = join(v, mc) for c in (c for c in C1 if c != mc): C.append(join(v, c)) for a, b in V1.items(): V[join(a, v)] = join(b, v) return V, C """ Explanation: Now let us implement an extension algorithm. We are leaving out the cancelling step for clarity. End of explanation """ K = closure([(1, 2, 3)]) f = {1: 0, 2: 1, 3: 2} extend(K, f) K = closure([(1, 2, 3), (2, 3, 4)]) f = {1: 0, 2: 1, 3: 2, 4: 0} extend(K, f) K = closure([(1, 2, 3), (2, 3, 4)]) f = {1: 0, 2: 1, 3: 2, 4: 3} extend(K, f) """ Explanation: Let us test the algorithm on the example from the previous step (full triangle). End of explanation """
statsmodels/statsmodels.github.io
v0.13.1/examples/notebooks/generated/pca_fertility_factors.ipynb
bsd-3-clause
%matplotlib inline import matplotlib.pyplot as plt import statsmodels.api as sm from statsmodels.multivariate.pca import PCA plt.rc("figure", figsize=(16, 8)) plt.rc("font", size=14) """ Explanation: statsmodels Principal Component Analysis Key ideas: Principal component analysis, world bank data, fertility In this notebook, we use principal components analysis (PCA) to analyze the time series of fertility rates in 192 countries, using data obtained from the World Bank. The main goal is to understand how the trends in fertility over time differ from country to country. This is a slightly atypical illustration of PCA because the data are time series. Methods such as functional PCA have been developed for this setting, but since the fertility data are very smooth, there is no real disadvantage to using standard PCA in this case. End of explanation """ data = sm.datasets.fertility.load_pandas().data data.head() """ Explanation: The data can be obtained from the World Bank web site, but here we work with a slightly cleaned-up version of the data: End of explanation """ columns = list(map(str, range(1960, 2012))) data.set_index("Country Name", inplace=True) dta = data[columns] dta = dta.dropna() dta.head() """ Explanation: Here we construct a DataFrame that contains only the numerical fertility rate data and set the index to the country names. We also drop all the countries with any missing data. End of explanation """ ax = dta.mean().plot(grid=False) ax.set_xlabel("Year", size=17) ax.set_ylabel("Fertility rate", size=17) ax.set_xlim(0, 51) """ Explanation: There are two ways to use PCA to analyze a rectangular matrix: we can treat the rows as the "objects" and the columns as the "variables", or vice-versa. Here we will treat the fertility measures as "variables" used to measure the countries as "objects". Thus the goal will be to reduce the yearly fertility rate values to a small number of fertility rate "profiles" or "basis functions" that capture most of the variation over time in the different countries. The mean trend is removed in PCA, but its worthwhile taking a look at it. It shows that fertility has dropped steadily over the time period covered in this dataset. Note that the mean is calculated using a country as the unit of analysis, ignoring population size. This is also true for the PC analysis conducted below. A more sophisticated analysis might weight the countries, say by population in 1980. End of explanation """ pca_model = PCA(dta.T, standardize=False, demean=True) """ Explanation: Next we perform the PCA: End of explanation """ fig = pca_model.plot_scree(log_scale=False) """ Explanation: Based on the eigenvalues, we see that the first PC dominates, with perhaps a small amount of meaningful variation captured in the second and third PC's. End of explanation """ fig, ax = plt.subplots(figsize=(8, 4)) lines = ax.plot(pca_model.factors.iloc[:, :3], lw=4, alpha=0.6) ax.set_xticklabels(dta.columns.values[::10]) ax.set_xlim(0, 51) ax.set_xlabel("Year", size=17) fig.subplots_adjust(0.1, 0.1, 0.85, 0.9) legend = fig.legend(lines, ["PC 1", "PC 2", "PC 3"], loc="center right") legend.draw_frame(False) """ Explanation: Next we will plot the PC factors. The dominant factor is monotonically increasing. Countries with a positive score on the first factor will increase faster (or decrease slower) compared to the mean shown above. Countries with a negative score on the first factor will decrease faster than the mean. The second factor is U-shaped with a positive peak at around 1985. Countries with a large positive score on the second factor will have lower than average fertilities at the beginning and end of the data range, but higher than average fertility in the middle of the range. End of explanation """ idx = pca_model.loadings.iloc[:, 0].argsort() """ Explanation: To better understand what is going on, we will plot the fertility trajectories for sets of countries with similar PC scores. The following convenience function produces such a plot. End of explanation """ def make_plot(labels): fig, ax = plt.subplots(figsize=(9, 5)) ax = dta.loc[labels].T.plot(legend=False, grid=False, ax=ax) dta.mean().plot(ax=ax, grid=False, label="Mean") ax.set_xlim(0, 51) fig.subplots_adjust(0.1, 0.1, 0.75, 0.9) ax.set_xlabel("Year", size=17) ax.set_ylabel("Fertility", size=17) legend = ax.legend( *ax.get_legend_handles_labels(), loc="center left", bbox_to_anchor=(1, 0.5) ) legend.draw_frame(False) labels = dta.index[idx[-5:]] make_plot(labels) """ Explanation: First we plot the five countries with the greatest scores on PC 1. These countries have a higher rate of fertility increase than the global mean (which is decreasing). End of explanation """ idx = pca_model.loadings.iloc[:, 1].argsort() make_plot(dta.index[idx[-5:]]) """ Explanation: Here are the five countries with the greatest scores on factor 2. These are countries that reached peak fertility around 1980, later than much of the rest of the world, followed by a rapid decrease in fertility. End of explanation """ make_plot(dta.index[idx[:5]]) """ Explanation: Finally we have the countries with the most negative scores on PC 2. These are the countries where the fertility rate declined much faster than the global mean during the 1960's and 1970's, then flattened out. End of explanation """ fig, ax = plt.subplots() pca_model.loadings.plot.scatter(x="comp_00", y="comp_01", ax=ax) ax.set_xlabel("PC 1", size=17) ax.set_ylabel("PC 2", size=17) dta.index[pca_model.loadings.iloc[:, 1] > 0.2].values """ Explanation: We can also look at a scatterplot of the first two principal component scores. We see that the variation among countries is fairly continuous, except perhaps that the two countries with highest scores for PC 2 are somewhat separated from the other points. These countries, Oman and Yemen, are unique in having a sharp spike in fertility around 1980. No other country has such a spike. In contrast, the countries with high scores on PC 1 (that have continuously increasing fertility), are part of a continuum of variation. End of explanation """
rickiepark/tfk-notebooks
tensorflow_for_beginners/5. Fully Connected Neural Network.ipynb
mit
from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) """ Explanation: 텐서플로우 라이브러리를 임포트 하세요. 텐서플로우에는 MNIST 데이터를 자동으로 로딩해 주는 헬퍼 함수가 있습니다. "MNIST_data" 폴더에 데이터를 다운로드하고 훈련, 검증, 테스트 데이터를 자동으로 읽어 들입니다. one_hot 옵션을 설정하면 정답 레이블을 원핫벡터로 바꾸어 줍니다. End of explanation """ plt.imshow(mnist.train.images[..].reshape([.., ..]), cmap=plt.get_cmap('gray_r')) """ Explanation: minist.train.images에는 훈련용 이미지 데이터가 있고 mnist.test.images에는 테스트용 이미지 데이터가 있습니다. 이 데이터의 크기를 확인해 보세요. matplotlib에는 이미지를 그려주는 imshow() 함수가 있습니다. 우리가 읽어 들인 mnist.train.images는 길이 784의 배열입니다. 55,000개 중에서 원하는 하나를 출력해 보세요. 이미지로 표현하려면 원본 이미지 사각형 크기인 [28, 28]로 변경해 줍니다. 그리고 흑백이미지 이므로 컬러맵을 그레이 스케일로 지정합니다. End of explanation """ mnist.train.labels[..] """ Explanation: mnist.train.labels에는 정답값 y 가 들어 있습니다. 원핫벡터로 로드되었는지 55,000개의 정답 데이터 중 하나를 확인해 보세요. End of explanation """ sess = tf.Session() sess.run(tf.global_variables_initializer()) """ Explanation: 훈련 데이터는 55,000개로 한꺼번에 처리하기에 너무 많습니다. 그래서 미니배치 그래디언트 디센트 방식을 사용하려고 합니다. 미니배치 방식을 사용하려면 훈련 데이터에서 일부를 쪼개어 반복하여 텐서플로우 모델에 주입해 주어야 합니다. 텐서플로우 모델이 동작하면서 입력 데이터를 받기위해 플레이스 홀더를 정의합니다. 플레이스 홀더는 x(이미지), y(정답 레이블) 두가지입니다. x = tf.placeholder("float32", [None, 784]) y = tf.placeholder("float32", shape=[None, 10]) 첫번째 레이어의 행렬식을 만듭니다. 이 식은 입력 데이터 x와 첫번째 레이어의 가중치 W1을 곱하고 편향 b1을 더합니다. 첫번째 레이어의 뉴런(유닛) 개수를 100개로 지정하겠습니다. 입력 데이터 x 는 [None, 784] 사이즈의 플레이스 홀더이므로 가중치의 크기는 [784, 100] 이 되어야 결과 행렬이 [None, 100] 이 되어 다음 레이어로 전달됩니다. W1 = tf.Variable(tf.truncated_normal([784, 100], stddev=0.1)) b1 = tf.Variable(tf.constant(0.1, shape=[100])) tf.matmul 함수를 사용하여 행렬곱을 한다음 편향을 더하고 첫번째 레이어의 활성화 함수인 시그모이드 함수를 적용합니다. 텐서플로우에는 시그모이드 함수를 내장하고 있습니다. t = tf.sigmoid(tf.matmul(x,W1) + b1) 출력 레이어의 계산식을 만들기 위해 가중치 W2와 b2 변수를 만듭니다. 직전의 히든 레이어의 출력 사이즈가 [None, 100]이고 출력 유닛의 개수는 10개 이므로 가중치 W2의 크기는 [100, 10] 이 됩니다. 편향 b2의 크기는 [10]입니다. W2 = tf.Variable(tf.truncated_normal([100, 10], stddev=0.1)) b2 = tf.Variable(tf.constant(0.1, shape=[10])) 출력 레이어의 행렬곱을 계산합니다. 이전 히든 레이어의 출력 t와 W2를 곱하고 b2를 더합니다. z = tf.matmul(t,W2) + b2 출력 값을 정규화하여 정답과 비교하려면 소프트맥스 함수를 적용해야 합니다. 텐서플로우에는 소프트맥스 함수가 내장되어 있습니다. y_hat = tf.nn.softmax(z) 손실 함수 크로스 엔트로피를 계산하기 위해 위에서 구한 y_hat을 사용해도 되지만 텐서플로우에는 소프트맥스를 통과하기 전의 값 z 를 이용하여 소프트맥스 크로스 엔트로피를 계산해 주는 함수를 내장하고 있습니다. softmax_cross_entropy를 이용하여 z 와 정답 y 의 손실을 계산합니다. loss = tf.losses.softmax_cross_entropy(y, z) 학습속도 0.5로 경사하강법을 적용하고 위에서 만든 손실 함수를 이용해 훈련 노드를 생성합니다. optimizer = tf.train.GradientDescentOptimizer(0.5) train = optimizer.minimize(loss) 올바르게 분류된 정확도를 계산하려면 정답을 가지고 있는 원핫벡터인 y 와 소프트맥스를 통과한 원핫벡터인 y_hat을 비교해야 합니다. 이 두 텐서는 [None, 10]의 크기를 가지고 있습니다. 따라서 행방향(1)으로 가장 큰 값을 가진 인덱스(argmax)를 찾아서 같은지(equal) 확인하면 됩니다. correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_hat,1)) correct_prediction은 [True, False, ...] 와 같은 배열이므로 불리언을 숫자(1,0)로 바꾼다음(cast) 전체를 합하여 평균을 내면 정확도 값을 얻을 수 있습니다. accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 세션 객체를 만들고 모델에 사용할 변수를 초기화합니다. End of explanation """ costs = [] for i in range(1000): x_data, y_data = mnist.train.next_batch(100) _, cost = sess.run([train, loss], feed_dict={x: x_data, y: y_data}) costs.append(cost) """ Explanation: 1000번 반복을 하면서 훈련 데이터에서 100개씩 뽑아내어(mnist.train.next_batch) 모델에 주입합니다. 모델의 플레이스 홀더에 주입하려면 플레이스 홀더의 이름과 넘겨줄 값을 딕셔너리 형태로 묶어서 feed_dict 매개변수에 전달합니다. 계산할 값은 훈련 노드 train 과 학습 과정을 그래프로 출력하기 위해 손실함수 값을 계산하여 costs 리스트에 누적합니다. End of explanation """ plt.plot(costs) """ Explanation: costs 리스트를 그래프로 출력합니다. End of explanation """ for i in range(5): plt.imshow(mnist.test.images[i].reshape([28, 28]), cmap=plt.get_cmap('gray_r')) plt.show() print(sess.run(tf.argmax(y_hat,1), feed_dict={x: mnist.test.images[i].reshape([1,784])})) """ Explanation: 정확도를 계산하기 위해 만든 노드 accuracy를 실행합니다. 이때 입력 데이터는 mnist.test 로 훈련시에 사용하지 않았던 데이터입니다. 이 정확도 계산은 위에서 학습시킨 W1, b1, W2, b2 를 이용하여 레이블을 예측한 결과입니다. sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}) 실제 이미지와 예측 값이 동일한지 확인하기 위해 테스트 데이터 앞의 5개 이미지와 예측 값을 차례대로 출력해 봅니다. End of explanation """
squishbug/DataScienceProgramming
11-Similarity-Based-Learning/SimilarityBased.ipynb
cc0-1.0
import numpy as np import math as ma import matplotlib.pyplot as plt %matplotlib inline X = np.array([3.3, 1.2]) Y = np.array([2.1, -1.8]) plt.arrow(0,0,*X, head_width=0.2); plt.arrow(0,0,*Y, head_width=0.2); plt.xlim([0, 4]); plt.ylim([-2,2]); plt.show(); # Euclidean distance manually: ma.sqrt(np.sum((X-Y)**2)) # numpy norm: np.linalg.norm(X-Y) """ Explanation: Similarity-based Learning Similiarity-based approaches in machine learning come from the idea that the best way to make predictions is simply to look at what has worked in the past and predict the same thing again. The fundamental concepts required to build a system based on this idea are feature spaces and measures of similarity. Reading Chapter 5 of Fundamentals of Machine Learning for Predictive Data Analytics Chapter 5 Slides 'A' (internal) Chapter 5 Slides 'B' (internal) Slides are posted on the internal server http://131.96.197.204/~pmolnar/mlbook What is a metric? The "distance" $d$ between two points in a vector space must satisfy the following requirements: * It is non-negative: $d(x,y) \geq 0$ for all $x$, $y$, with $d(x,y) = 0$ if and only if $x = y$ * It is symmetric: $d(x,y) = d(y,x)$ * It satisfies the triangle inequality: $d(x,y) \leq d(x,z) + d(z,y)$ Some common measures of distance: Euclidean distance This is perhaps the most commonly used distance metric: $d(X,Y) = \sqrt{(X_0-Y_0)^2 + (X_1-Y_1)^2}$. End of explanation """ import scipy.spatial.distance as dst # Manhattan distance dst.cdist(np.expand_dims(X, axis=0),np.expand_dims(Y, axis=0),'cityblock') # Chebyshev distance dst.cdist(np.expand_dims(X, axis=0),np.expand_dims(Y, axis=0),'chebyshev') """ Explanation: More general Minkowski distance In a d-dimensional vector space, the Minkowski distance of order $p$ is defined as: $d_p(X,Y) = \left(\sum_{i=1}^{d} \left|X_i - Y_i\right|^p \right)^{1/p}$ The Euclidean distance is a special case of the Minkowski distance with $p=2$. Some other common cases include: The Manhattan distance: $p = 1$ The Chebyshev distance: $p = \infty$, where $d_\infty(X,Y) = \max_{i = 0,\ldots,n}\left| X_i - Y_i \right|$ End of explanation """ import pandas as pd from sklearn.neighbors import NearestNeighbors # read in the basketball draft dataset df = pd.read_csv('./Table5-2.csv', names=['ID','Speed','Agility','Draft'], skiprows=[0]) df.head() fig, ax = plt.subplots() ax.margins(0.05) groups = df.groupby('Draft') for name, group in groups: ax.plot(group.Speed, group.Agility, marker='o', linestyle='', ms=12, label=name); ax.legend(numpoints=1, loc='lower right'); """ Explanation: Scaling the Axes The Euclidean distance can be written in (suggestive) vector notation as: $d^2(X,Y) = (X-Y)^T I_{n \times n} (X-Y)$ Instead of the $n \times n$ identity matrix, we could use and positive definite matrix. A positive definite matrix is defined as a matrix $M$ for which $z^T M z \geq 0$ for all real vectors $z$, with equality only if $z$ is the vector of all zeros. We can use this matrix to appropriately rescale the axes, for example to correct for high variance along a given dimension in our feature space: this gives us the Mahalanobis metric, $d_M(X,Y) = (X-Y)^T \Sigma^{-1} (X-Y)$, where $\Sigma$ is the covariance matrix of your data points. Additional reading: Distances between words (taking into account the context): http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/ Let's try some clustering End of explanation """ # Let's fit a nearest-neighbor model to our data, using Euclidean distance... nearest_neighbor_model = NearestNeighbors(1, metric='euclidean').fit(df[['Speed','Agility']], df['Draft']) # OK, now let's find the nearest neighbors for some new data points! samples = np.array([[7,7],[5,5]]) # samples to classify, in [speed, agility] format fig, ax = plt.subplots() ax.margins(0.05) groups = df.groupby('Draft') for name, group in groups: ax.plot(group.Speed, group.Agility, marker='o', linestyle='', ms=12, label=name); ax.legend(numpoints=1, loc='lower right'); ax.plot(samples[:,0],samples[:,1], marker='o', linestyle='', ms=12, c='red'); nearest_neighbor_model.kneighbors(samples, return_distance=True) df.Draft.iloc[nearest_neighbor_model.kneighbors(samples, return_distance=False).ravel()] # the kneighbors method returns the index of the # nearest neighbors.... nearest_neighbor_model.kneighbors([[7,7],[5,4]], return_distance=False).ravel() """ Explanation: Nearest-neighbor clustering End of explanation """ from sklearn.neighbors import KNeighborsClassifier # define model and train it on the input data knn_model = KNeighborsClassifier(n_neighbors=5, metric='euclidean').fit(df[['Speed','Agility']], df['Draft']) # predict classes for "samples", using k nearest neighbors knn_model.predict(samples) from sklearn.cluster import KMeans kmeans_model = KMeans(2).fit(df[['Speed','Agility']]) df['Clust'] = kmeans_model.predict(df[['Speed','Agility']]) fig, ax = plt.subplots() ax.margins(0.05) groups = df.groupby('Clust') for name, group in groups: ax.plot(group.Speed, group.Agility, marker='o', linestyle='', ms=12, label=name); ax.legend(numpoints=1, loc='lower right'); ax.plot(samples[:,0],samples[:,1], marker='o', linestyle='', ms=12, c='red'); help(KMeans) """ Explanation: K-Nearest Neighbors Classifier The NearestNeighbors function helps us recover the neighbors that are closest to the desired data point; but if we're interested in using k-nearest neighbors for classification, we can use KNeighborsClassifier. End of explanation """
Kaggle/learntools
notebooks/feature_engineering_new/raw/tut_bonus.ipynb
apache-2.0
#$HIDE_INPUT$ import os import warnings from pathlib import Path import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from IPython.display import display from pandas.api.types import CategoricalDtype from category_encoders import MEstimateEncoder from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.feature_selection import mutual_info_regression from sklearn.model_selection import KFold, cross_val_score from xgboost import XGBRegressor # Set Matplotlib defaults plt.style.use("seaborn-whitegrid") plt.rc("figure", autolayout=True) plt.rc( "axes", labelweight="bold", labelsize="large", titleweight="bold", titlesize=14, titlepad=10, ) # Mute warnings warnings.filterwarnings('ignore') """ Explanation: Introduction Welcome to the feature engineering project for the House Prices - Advanced Regression Techniques competition! This competition uses nearly the same data you used in the exercises of the Feature Engineering course. We'll collect together the work you did into a complete project which you can build off of with ideas of your own. <blockquote style="margin-right:auto; margin-left:auto; background-color: #ebf9ff; padding: 1em; margin:24px;"> <strong>Fork This Notebook!</strong><br> Create your own editable copy of this notebook by clicking on the <strong>Copy and Edit</strong> button in the top right corner. </blockquote> Step 1 - Preliminaries Imports and Configuration We'll start by importing the packages we used in the exercises and setting some notebook defaults. Unhide this cell if you'd like to see the libraries we'll use: End of explanation """ def load_data(): # Read data data_dir = Path("../input/house-prices-advanced-regression-techniques/") df_train = pd.read_csv(data_dir / "train.csv", index_col="Id") df_test = pd.read_csv(data_dir / "test.csv", index_col="Id") # Merge the splits so we can process them together df = pd.concat([df_train, df_test]) # Preprocessing df = clean(df) df = encode(df) df = impute(df) # Reform splits df_train = df.loc[df_train.index, :] df_test = df.loc[df_test.index, :] return df_train, df_test """ Explanation: Data Preprocessing Before we can do any feature engineering, we need to preprocess the data to get it in a form suitable for analysis. The data we used in the course was a bit simpler than the competition data. For the Ames competition dataset, we'll need to: - Load the data from CSV files - Clean the data to fix any errors or inconsistencies - Encode the statistical data type (numeric, categorical) - Impute any missing values We'll wrap all these steps up in a function, which will make easy for you to get a fresh dataframe whenever you need. After reading the CSV file, we'll apply three preprocessing steps, clean, encode, and impute, and then create the data splits: one (df_train) for training the model, and one (df_test) for making the predictions that you'll submit to the competition for scoring on the leaderboard. End of explanation """ data_dir = Path("../input/house-prices-advanced-regression-techniques/") df = pd.read_csv(data_dir / "train.csv", index_col="Id") df.Exterior2nd.unique() """ Explanation: Clean Data Some of the categorical features in this dataset have what are apparently typos in their categories: End of explanation """ def clean(df): df["Exterior2nd"] = df["Exterior2nd"].replace({"Brk Cmn": "BrkComm"}) # Some values of GarageYrBlt are corrupt, so we'll replace them # with the year the house was built df["GarageYrBlt"] = df["GarageYrBlt"].where(df.GarageYrBlt <= 2010, df.YearBuilt) # Names beginning with numbers are awkward to work with df.rename(columns={ "1stFlrSF": "FirstFlrSF", "2ndFlrSF": "SecondFlrSF", "3SsnPorch": "Threeseasonporch", }, inplace=True, ) return df """ Explanation: Comparing these to data_description.txt shows us what needs cleaning. We'll take care of a couple of issues here, but you might want to evaluate this data further. End of explanation """ #$HIDE_INPUT$ # The numeric features are already encoded correctly (`float` for # continuous, `int` for discrete), but the categoricals we'll need to # do ourselves. Note in particular, that the `MSSubClass` feature is # read as an `int` type, but is actually a (nominative) categorical. # The nominative (unordered) categorical features features_nom = ["MSSubClass", "MSZoning", "Street", "Alley", "LandContour", "LotConfig", "Neighborhood", "Condition1", "Condition2", "BldgType", "HouseStyle", "RoofStyle", "RoofMatl", "Exterior1st", "Exterior2nd", "MasVnrType", "Foundation", "Heating", "CentralAir", "GarageType", "MiscFeature", "SaleType", "SaleCondition"] # The ordinal (ordered) categorical features # Pandas calls the categories "levels" five_levels = ["Po", "Fa", "TA", "Gd", "Ex"] ten_levels = list(range(10)) ordered_levels = { "OverallQual": ten_levels, "OverallCond": ten_levels, "ExterQual": five_levels, "ExterCond": five_levels, "BsmtQual": five_levels, "BsmtCond": five_levels, "HeatingQC": five_levels, "KitchenQual": five_levels, "FireplaceQu": five_levels, "GarageQual": five_levels, "GarageCond": five_levels, "PoolQC": five_levels, "LotShape": ["Reg", "IR1", "IR2", "IR3"], "LandSlope": ["Sev", "Mod", "Gtl"], "BsmtExposure": ["No", "Mn", "Av", "Gd"], "BsmtFinType1": ["Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ"], "BsmtFinType2": ["Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ"], "Functional": ["Sal", "Sev", "Maj1", "Maj2", "Mod", "Min2", "Min1", "Typ"], "GarageFinish": ["Unf", "RFn", "Fin"], "PavedDrive": ["N", "P", "Y"], "Utilities": ["NoSeWa", "NoSewr", "AllPub"], "CentralAir": ["N", "Y"], "Electrical": ["Mix", "FuseP", "FuseF", "FuseA", "SBrkr"], "Fence": ["MnWw", "GdWo", "MnPrv", "GdPrv"], } # Add a None level for missing values ordered_levels = {key: ["None"] + value for key, value in ordered_levels.items()} def encode(df): # Nominal categories for name in features_nom: df[name] = df[name].astype("category") # Add a None category for missing values if "None" not in df[name].cat.categories: df[name].cat.add_categories("None", inplace=True) # Ordinal categories for name, levels in ordered_levels.items(): df[name] = df[name].astype(CategoricalDtype(levels, ordered=True)) return df """ Explanation: Encode the Statistical Data Type Pandas has Python types corresponding to the standard statistical types (numeric, categorical, etc.). Encoding each feature with its correct type helps ensure each feature is treated appropriately by whatever functions we use, and makes it easier for us to apply transformations consistently. This hidden cell defines the encode function: End of explanation """ def impute(df): for name in df.select_dtypes("number"): df[name] = df[name].fillna(0) for name in df.select_dtypes("category"): df[name] = df[name].fillna("None") return df """ Explanation: Handle Missing Values Handling missing values now will make the feature engineering go more smoothly. We'll impute 0 for missing numeric values and "None" for missing categorical values. You might like to experiment with other imputation strategies. In particular, you could try creating "missing value" indicators: 1 whenever a value was imputed and 0 otherwise. End of explanation """ df_train, df_test = load_data() """ Explanation: Load Data And now we can call the data loader and get the processed data splits: End of explanation """ # Peek at the values #display(df_train) #display(df_test) # Display information about dtypes and missing values #display(df_train.info()) #display(df_test.info()) """ Explanation: Uncomment and run this cell if you'd like to see what they contain. Notice that df_test is missing values for SalePrice. (NAs were willed with 0's in the imputation step.) End of explanation """ #$HIDE_INPUT$ def score_dataset(X, y, model=XGBRegressor()): # Label encoding for categoricals # # Label encoding is good for XGBoost and RandomForest, but one-hot # would be better for models like Lasso or Ridge. The `cat.codes` # attribute holds the category levels. for colname in X.select_dtypes(["category"]): X[colname] = X[colname].cat.codes # Metric for Housing competition is RMSLE (Root Mean Squared Log Error) log_y = np.log(y) score = cross_val_score( model, X, log_y, cv=5, scoring="neg_mean_squared_error", ) score = -1 * score.mean() score = np.sqrt(score) return score """ Explanation: Establish Baseline Finally, let's establish a baseline score to judge our feature engineering against. Here is the function we created in Lesson 1 that will compute the cross-validated RMSLE score for a feature set. We've used XGBoost for our model, but you might want to experiment with other models. End of explanation """ X = df_train.copy() y = X.pop("SalePrice") baseline_score = score_dataset(X, y) print(f"Baseline score: {baseline_score:.5f} RMSLE") """ Explanation: We can reuse this scoring function anytime we want to try out a new feature set. We'll run it now on the processed data with no additional features and get a baseline score: End of explanation """ #$HIDE_INPUT$ def make_mi_scores(X, y): X = X.copy() for colname in X.select_dtypes(["object", "category"]): X[colname], _ = X[colname].factorize() # All discrete features should now have integer dtypes discrete_features = [pd.api.types.is_integer_dtype(t) for t in X.dtypes] mi_scores = mutual_info_regression(X, y, discrete_features=discrete_features, random_state=0) mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns) mi_scores = mi_scores.sort_values(ascending=False) return mi_scores def plot_mi_scores(scores): scores = scores.sort_values(ascending=True) width = np.arange(len(scores)) ticks = list(scores.index) plt.barh(width, scores) plt.yticks(width, ticks) plt.title("Mutual Information Scores") """ Explanation: This baseline score helps us to know whether some set of features we've assembled has actually led to any improvement or not. Step 2 - Feature Utility Scores In Lesson 2 we saw how to use mutual information to compute a utility score for a feature, giving you an indication of how much potential the feature has. This hidden cell defines the two utility functions we used, make_mi_scores and plot_mi_scores: End of explanation """ X = df_train.copy() y = X.pop("SalePrice") mi_scores = make_mi_scores(X, y) mi_scores """ Explanation: Let's look at our feature scores again: End of explanation """ def drop_uninformative(df, mi_scores): return df.loc[:, mi_scores > 0.0] """ Explanation: You can see that we have a number of features that are highly informative and also some that don't seem to be informative at all (at least by themselves). As we talked about in Tutorial 2, the top scoring features will usually pay-off the most during feature development, so it could be a good idea to focus your efforts on those. On the other hand, training on uninformative features can lead to overfitting. So, the features with 0.0 scores we'll drop entirely: End of explanation """ X = df_train.copy() y = X.pop("SalePrice") X = drop_uninformative(X, mi_scores) score_dataset(X, y) """ Explanation: Removing them does lead to a modest performance gain: End of explanation """ def label_encode(df): X = df.copy() for colname in X.select_dtypes(["category"]): X[colname] = X[colname].cat.codes return X """ Explanation: Later, we'll add the drop_uninformative function to our feature-creation pipeline. Step 3 - Create Features Now we'll start developing our feature set. To make our feature engineering workflow more modular, we'll define a function that will take a prepared dataframe and pass it through a pipeline of transformations to get the final feature set. It will look something like this: def create_features(df): X = df.copy() y = X.pop("SalePrice") X = X.join(create_features_1(X)) X = X.join(create_features_2(X)) X = X.join(create_features_3(X)) # ... return X Let's go ahead and define one transformation now, a label encoding for the categorical features: End of explanation """ #$HIDE_INPUT$ def mathematical_transforms(df): X = pd.DataFrame() # dataframe to hold new features X["LivLotRatio"] = df.GrLivArea / df.LotArea X["Spaciousness"] = (df.FirstFlrSF + df.SecondFlrSF) / df.TotRmsAbvGrd # This feature ended up not helping performance # X["TotalOutsideSF"] = \ # df.WoodDeckSF + df.OpenPorchSF + df.EnclosedPorch + \ # df.Threeseasonporch + df.ScreenPorch return X def interactions(df): X = pd.get_dummies(df.BldgType, prefix="Bldg") X = X.mul(df.GrLivArea, axis=0) return X def counts(df): X = pd.DataFrame() X["PorchTypes"] = df[[ "WoodDeckSF", "OpenPorchSF", "EnclosedPorch", "Threeseasonporch", "ScreenPorch", ]].gt(0.0).sum(axis=1) return X def break_down(df): X = pd.DataFrame() X["MSClass"] = df.MSSubClass.str.split("_", n=1, expand=True)[0] return X def group_transforms(df): X = pd.DataFrame() X["MedNhbdArea"] = df.groupby("Neighborhood")["GrLivArea"].transform("median") return X """ Explanation: A label encoding is okay for any kind of categorical feature when you're using a tree-ensemble like XGBoost, even for unordered categories. If you wanted to try a linear regression model (also popular in this competition), you would instead want to use a one-hot encoding, especially for the features with unordered categories. Create Features with Pandas This cell reproduces the work you did in Exercise 3, where you applied strategies for creating features in Pandas. Modify or add to these functions to try out other feature combinations. End of explanation """ #$HIDE_INPUT$ cluster_features = [ "LotArea", "TotalBsmtSF", "FirstFlrSF", "SecondFlrSF", "GrLivArea", ] def cluster_labels(df, features, n_clusters=20): X = df.copy() X_scaled = X.loc[:, features] X_scaled = (X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0) kmeans = KMeans(n_clusters=n_clusters, n_init=50, random_state=0) X_new = pd.DataFrame() X_new["Cluster"] = kmeans.fit_predict(X_scaled) return X_new def cluster_distance(df, features, n_clusters=20): X = df.copy() X_scaled = X.loc[:, features] X_scaled = (X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0) kmeans = KMeans(n_clusters=20, n_init=50, random_state=0) X_cd = kmeans.fit_transform(X_scaled) # Label features and join to dataset X_cd = pd.DataFrame( X_cd, columns=[f"Centroid_{i}" for i in range(X_cd.shape[1])] ) return X_cd """ Explanation: Here are some ideas for other transforms you could explore: - Interactions between the quality Qual and condition Cond features. OverallQual, for instance, was a high-scoring feature. You could try combining it with OverallCond by converting both to integer type and taking a product. - Square roots of area features. This would convert units of square feet to just feet. - Logarithms of numeric features. If a feature has a skewed distribution, applying a logarithm can help normalize it. - Interactions between numeric and categorical features that describe the same thing. You could look at interactions between BsmtQual and TotalBsmtSF, for instance. - Other group statistics in Neighboorhood. We did the median of GrLivArea. Looking at mean, std, or count could be interesting. You could also try combining the group statistics with other features. Maybe the difference of GrLivArea and the median is important? k-Means Clustering The first unsupervised algorithm we used to create features was k-means clustering. We saw that you could either use the cluster labels as a feature (a column with 0, 1, 2, ...) or you could use the distance of the observations to each cluster. We saw how these features can sometimes be effective at untangling complicated spatial relationships. End of explanation """ #$HIDE_INPUT$ def apply_pca(X, standardize=True): # Standardize if standardize: X = (X - X.mean(axis=0)) / X.std(axis=0) # Create principal components pca = PCA() X_pca = pca.fit_transform(X) # Convert to dataframe component_names = [f"PC{i+1}" for i in range(X_pca.shape[1])] X_pca = pd.DataFrame(X_pca, columns=component_names) # Create loadings loadings = pd.DataFrame( pca.components_.T, # transpose the matrix of loadings columns=component_names, # so the columns are the principal components index=X.columns, # and the rows are the original features ) return pca, X_pca, loadings def plot_variance(pca, width=8, dpi=100): # Create figure fig, axs = plt.subplots(1, 2) n = pca.n_components_ grid = np.arange(1, n + 1) # Explained variance evr = pca.explained_variance_ratio_ axs[0].bar(grid, evr) axs[0].set( xlabel="Component", title="% Explained Variance", ylim=(0.0, 1.0) ) # Cumulative Variance cv = np.cumsum(evr) axs[1].plot(np.r_[0, grid], np.r_[0, cv], "o-") axs[1].set( xlabel="Component", title="% Cumulative Variance", ylim=(0.0, 1.0) ) # Set up figure fig.set(figwidth=8, dpi=100) return axs """ Explanation: Principal Component Analysis PCA was the second unsupervised model we used for feature creation. We saw how it could be used to decompose the variational structure in the data. The PCA algorithm gave us loadings which described each component of variation, and also the components which were the transformed datapoints. The loadings can suggest features to create and the components we can use as features directly. Here are the utility functions from the PCA lesson: End of explanation """ #$HIDE_INPUT$ def pca_inspired(df): X = pd.DataFrame() X["Feature1"] = df.GrLivArea + df.TotalBsmtSF X["Feature2"] = df.YearRemodAdd * df.TotalBsmtSF return X def pca_components(df, features): X = df.loc[:, features] _, X_pca, _ = apply_pca(X) return X_pca pca_features = [ "GarageArea", "YearRemodAdd", "TotalBsmtSF", "GrLivArea", ] """ Explanation: And here are transforms that produce the features from the Exercise 5. You might want to change these if you came up with a different answer. End of explanation """ def corrplot(df, method="pearson", annot=True, **kwargs): sns.clustermap( df.corr(method), vmin=-1.0, vmax=1.0, cmap="icefire", method="complete", annot=annot, **kwargs, ) corrplot(df_train, annot=None) """ Explanation: These are only a couple ways you could use the principal components. You could also try clustering using one or more components. One thing to note is that PCA doesn't change the distance between points -- it's just like a rotation. So clustering with the full set of components is the same as clustering with the original features. Instead, pick some subset of components, maybe those with the most variance or the highest MI scores. For further analysis, you might want to look at a correlation matrix for the dataset: End of explanation """ def indicate_outliers(df): X_new = pd.DataFrame() X_new["Outlier"] = (df.Neighborhood == "Edwards") & (df.SaleCondition == "Partial") return X_new """ Explanation: Groups of highly correlated features often yield interesting loadings. PCA Application - Indicate Outliers In Exercise 5, you applied PCA to determine houses that were outliers, that is, houses having values not well represented in the rest of the data. You saw that there was a group of houses in the Edwards neighborhood having a SaleCondition of Partial whose values were especially extreme. Some models can benefit from having these outliers indicated, which is what this next transform will do. End of explanation """ #$HIDE_INPUT$ class CrossFoldEncoder: def __init__(self, encoder, **kwargs): self.encoder_ = encoder self.kwargs_ = kwargs # keyword arguments for the encoder self.cv_ = KFold(n_splits=5) # Fit an encoder on one split and transform the feature on the # other. Iterating over the splits in all folds gives a complete # transformation. We also now have one trained encoder on each # fold. def fit_transform(self, X, y, cols): self.fitted_encoders_ = [] self.cols_ = cols X_encoded = [] for idx_encode, idx_train in self.cv_.split(X): fitted_encoder = self.encoder_(cols=cols, **self.kwargs_) fitted_encoder.fit( X.iloc[idx_encode, :], y.iloc[idx_encode], ) X_encoded.append(fitted_encoder.transform(X.iloc[idx_train, :])[cols]) self.fitted_encoders_.append(fitted_encoder) X_encoded = pd.concat(X_encoded) X_encoded.columns = [name + "_encoded" for name in X_encoded.columns] return X_encoded # To transform the test data, average the encodings learned from # each fold. def transform(self, X): from functools import reduce X_encoded_list = [] for fitted_encoder in self.fitted_encoders_: X_encoded = fitted_encoder.transform(X) X_encoded_list.append(X_encoded[self.cols_]) X_encoded = reduce( lambda x, y: x.add(y, fill_value=0), X_encoded_list ) / len(X_encoded_list) X_encoded.columns = [name + "_encoded" for name in X_encoded.columns] return X_encoded """ Explanation: You could also consider applying some sort of robust scaler from scikit-learn's sklearn.preprocessing module to the outlying values, especially those in GrLivArea. Here is a tutorial illustrating some of them. Another option could be to create a feature of "outlier scores" using one of scikit-learn's outlier detectors. Target Encoding Needing a separate holdout set to create a target encoding is rather wasteful of data. In Tutorial 6 we used 25% of our dataset just to encode a single feature, Zipcode. The data from the other features in that 25% we didn't get to use at all. There is, however, a way you can use target encoding without having to use held-out encoding data. It's basically the same trick used in cross-validation: 1. Split the data into folds, each fold having two splits of the dataset. 2. Train the encoder on one split but transform the values of the other. 3. Repeat for all the splits. This way, training and transformation always take place on independent sets of data, just like when you use a holdout set but without any data going to waste. In the next hidden cell is a wrapper you can use with any target encoder: End of explanation """ def create_features(df, df_test=None): X = df.copy() y = X.pop("SalePrice") mi_scores = make_mi_scores(X, y) # Combine splits if test data is given # # If we're creating features for test set predictions, we should # use all the data we have available. After creating our features, # we'll recreate the splits. if df_test is not None: X_test = df_test.copy() X_test.pop("SalePrice") X = pd.concat([X, X_test]) # Lesson 2 - Mutual Information X = drop_uninformative(X, mi_scores) # Lesson 3 - Transformations X = X.join(mathematical_transforms(X)) X = X.join(interactions(X)) X = X.join(counts(X)) # X = X.join(break_down(X)) X = X.join(group_transforms(X)) # Lesson 4 - Clustering # X = X.join(cluster_labels(X, cluster_features, n_clusters=20)) # X = X.join(cluster_distance(X, cluster_features, n_clusters=20)) # Lesson 5 - PCA X = X.join(pca_inspired(X)) # X = X.join(pca_components(X, pca_features)) # X = X.join(indicate_outliers(X)) X = label_encode(X) # Reform splits if df_test is not None: X_test = X.loc[df_test.index, :] X.drop(df_test.index, inplace=True) # Lesson 6 - Target Encoder encoder = CrossFoldEncoder(MEstimateEncoder, m=1) X = X.join(encoder.fit_transform(X, y, cols=["MSSubClass"])) if df_test is not None: X_test = X_test.join(encoder.transform(X_test)) if df_test is not None: return X, X_test else: return X df_train, df_test = load_data() X_train = create_features(df_train) y_train = df_train.loc[:, "SalePrice"] score_dataset(X_train, y_train) """ Explanation: Use it like: encoder = CrossFoldEncoder(MEstimateEncoder, m=1) X_encoded = encoder.fit_transform(X, y, cols=["MSSubClass"])) You can turn any of the encoders from the category_encoders library into a cross-fold encoder. The CatBoostEncoder would be worth trying. It's similar to MEstimateEncoder but uses some tricks to better prevent overfitting. Its smoothing parameter is called a instead of m. Create Final Feature Set Now let's combine everything together. Putting the transformations into separate functions makes it easier to experiment with various combinations. The ones I left uncommented I found gave the best results. You should experiment with you own ideas though! Modify any of these transformations or come up with some of your own to add to the pipeline. End of explanation """ X_train = create_features(df_train) y_train = df_train.loc[:, "SalePrice"] xgb_params = dict( max_depth=6, # maximum depth of each tree - try 2 to 10 learning_rate=0.01, # effect of each tree - try 0.0001 to 0.1 n_estimators=1000, # number of trees (that is, boosting rounds) - try 1000 to 8000 min_child_weight=1, # minimum number of houses in a leaf - try 1 to 10 colsample_bytree=0.7, # fraction of features (columns) per tree - try 0.2 to 1.0 subsample=0.7, # fraction of instances (rows) per tree - try 0.2 to 1.0 reg_alpha=0.5, # L1 regularization (like LASSO) - try 0.0 to 10.0 reg_lambda=1.0, # L2 regularization (like Ridge) - try 0.0 to 10.0 num_parallel_tree=1, # set > 1 for boosted random forests ) xgb = XGBRegressor(**xgb_params) score_dataset(X_train, y_train, xgb) """ Explanation: Step 4 - Hyperparameter Tuning At this stage, you might like to do some hyperparameter tuning with XGBoost before creating your final submission. End of explanation """ X_train, X_test = create_features(df_train, df_test) y_train = df_train.loc[:, "SalePrice"] xgb = XGBRegressor(**xgb_params) # XGB minimizes MSE, but competition loss is RMSLE # So, we need to log-transform y to train and exp-transform the predictions xgb.fit(X_train, np.log(y)) predictions = np.exp(xgb.predict(X_test)) output = pd.DataFrame({'Id': X_test.index, 'SalePrice': predictions}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!") """ Explanation: Just tuning these by hand can give you great results. However, you might like to try using one of scikit-learn's automatic hyperparameter tuners. Or you could explore more advanced tuning libraries like Optuna or scikit-optimize. Here is how you can use Optuna with XGBoost: ``` import optuna def objective(trial): xgb_params = dict( max_depth=trial.suggest_int("max_depth", 2, 10), learning_rate=trial.suggest_float("learning_rate", 1e-4, 1e-1, log=True), n_estimators=trial.suggest_int("n_estimators", 1000, 8000), min_child_weight=trial.suggest_int("min_child_weight", 1, 10), colsample_bytree=trial.suggest_float("colsample_bytree", 0.2, 1.0), subsample=trial.suggest_float("subsample", 0.2, 1.0), reg_alpha=trial.suggest_float("reg_alpha", 1e-4, 1e2, log=True), reg_lambda=trial.suggest_float("reg_lambda", 1e-4, 1e2, log=True), ) xgb = XGBRegressor(**xgb_params) return score_dataset(X_train, y_train, xgb) study = optuna.create_study(direction="minimize") study.optimize(objective, n_trials=20) xgb_params = study.best_params ``` Copy this into a code cell if you'd like to use it, but be aware that it will take quite a while to run. After it's done, you might enjoy using some of Optuna's visualizations. Step 5 - Train Model and Create Submissions Once you're satisfied with everything, it's time to create your final predictions! This cell will: - create your feature set from the original data - train XGBoost on the training data - use the trained model to make predictions from the test set - save the predictions to a CSV file End of explanation """
greenelab/GCB535
23_Prelab_Python-I/Python-I-Prelab.ipynb
bsd-3-clause
print "I am Python code! Press Shift+Enter to run me!" """ Explanation: Lesson 1: Introduction to Python Table of contents How to use this notebook Introduction Writing your first script The print statement Variables and data types Basic math Commenting code Test your understanding: practice set 1 1. How to use this notebook This is an interactive Jupyter notebook. Throughout the lesson you will see blocks of Python code with "In [ ]" to the left of them. If you click on these blocks and press Shift + Enter, the code will be run and the output will be printed below the block. In order to get credit for this pre-lab, you must run each code box. Try it out with this code block here: End of explanation """ print "I am a string. I am enclosed in quotes." print 123 print 2934.454 """ Explanation: Many of the code blocks in this notebook will start off in the "un-run" state, so make sure you run them to see what the output is! You can also edit these code blocks yourself and then re-run them to see what changes. This is a great way to learn more about programming, so feel free to do this to any of the code blocks in the lesson. As you go through these exercises, you might run into issues where pressing "Shift+Enter" doesn't work correctly and you can't get your code to run. This can happen with the platform we are using, the IPython notebook. The solution is to go to the menu bar underneath the 'Jupyter' logo, select 'Kernel', and then select 'Restart.' In our experience, this typically will do the trick to get the notebook running again! 2. Introduction Why learn Python? It's a particularly simple and easy to learn for beginners It's widely used by the scientific community Languages such as Perl and R are also quite popular among scientists, and are worth learning. Luckily, once you learn your first programming language, it's usually much easier to pick up additional languages! 3. Writing and running Python code A note about Python versions For all of the Python exercises in this course, we are using Python 2, which is the second version of the Python language. Python 3 has been released, but many of the same principles apply. It is important to know which version of a programming language you are using, especially in the case of Python, as the syntax has changed slightly between these two versions! There are several different ways you can write and run Python code. Here are the two ways we'll cover in this course: Programs and scripts: This is the most common method. Code is written in a .py file and then executed on the command line. We'll talk more about this later. Jupyter / IPython notebooks: That's what you're using right now! These notebooks allow you to write small (or large) blocks of code and immediately see the result. They also let you intersperse text and images with your code, which makes them very useful as research notebooks or as vignettes to share with others. We'll start off just writing code in Jupyter notebooks. For a quick introduction to how they work, click Help in the menu above, and then User interface tour. Try this now In the code block below, write the following and then run it: print "Hello world!" ^^^ Write your code here and press Shift+Enter to run! ^^^ 4. The print statement In the exercise above, we typed the following line of code: print "Hello world!" This is called a print statement. Its purpose is to print data to the terminal screen (or notebook output block, if applicable). If the data that we want to print is a line of text (called a string in the programming world), we must enclose the text in quotes. If we forget to do this, Python will likely give an error (more on why this occurs in the next section!). Note that the quotes around the string will not actually be printed -- they are not considered part of the string, they just demarcate where the string starts and ends. If the data we want to print is a number, we do not have to use quotes. Python recognizes numbers as a distinct type of data from strings. In fact, if you enclose your numbers in quotes, Python will start treating them like strings rather than numbers (which is sometimes what we want, but usually not). Let's look at a few examples: End of explanation """ print 1 + 2 print 2 * 3 """ Explanation: With numbers, we can also do basic math operations. For example: End of explanation """ print "1" + "2" """ Explanation: Here's where it becomes very important to be aware if you are using a string or a number. Let's see what happens when we try to add two numbers enclosed in quotes: End of explanation """ print "Hello" + "world" print "Space " + "added" """ Explanation: What happened here? Basically, whenever you try to "add" two strings, Python does something called concatenation -- merging them into one string. Python doesn't check whether the string holds a number, so it doesn't even try to "add" them in the traditional sense. Concatenation works for any strings, and is good for when we want to combine multiple strings into one: End of explanation """ print "5" + 5 """ Explanation: What happens if we try to combine a string and a number? End of explanation """ print "1 + 2 = ", 1 + 2, "!" """ Explanation: This is not allowed! Remember, Python doesn't consider "5" a number when it's enclosed in quotes -- it's just any old string. Python doesn't know how to combine a string and a number, so it gives an error message. If we want to print multiple types of data in the same line, we can use a comma instead of a plus sign. This tells Python, "don't try to combine these data -- just print them all separately to the screen!". For example: End of explanation """ geneID = "Fmr1" print geneID """ Explanation: Another version-related note This print statement is an example of how the syntax changed between Python 2 and 3. In Python 3, print statements are enclosed in parentheses, so this command would look like print("1 + 2 = ", 1 + 2, "!"), but if you try to run that in Python 2, your output will include the parentheses. This isn't so important to remember for this class since we are always using Python 2, but if you ever use Python 3, this is worth remembering! [ Check yourself! ] Print practice Think you got it? Using the code block below, write code to print your name. 5. Variables and data types What is a variable? You can think of variables as little boxes that we put data in. You name each box so that you can refer to it and use it in your code. This gives your code flexibility, for reasons you will see soon. Creating a variable is sometimes called "declaring" or "defining" a variable. This basically just requires giving the variable a name and assigning it an initial value. A variable usually needs to be defined in this way before you can use it elsewhere in your code (although there are a few exceptions we'll go over later). Here's an example of a variable definition: geneID = "Fmr1" Here, geneID is the variable name and "Fmr1" is the piece of data that is being stored in the variable. The = is what we call an assignment operator. In English, we might read this line of code as "store the string 'Fmr1' in the variable 'geneID'". Important to note: You should try not to think of = in Python as the same kind of equals sign you use in math. In math, a = implies equality of the information on either side. In Python, = simply means "assign the value on the right to the variable on the left". It may help to think of it more like an arrow pointing to the left, e.g. geneID &lt;- "Fmr1". What should I name my variables? You can name your variables almost anything, but there are a few important rules and conventions to keep in mind. Rules (if you break these, you will get errors): - only letters, numbers, and underscores can be used in a variable name - the variable name can not begin with a number - you can not use any of the python reserved words as a variable name - the capitalization of your variables matters. For example, geneID and geneid would be considered different variables. Conventions (recommended, but not required): - begin a variable name with a lower case letter - use a name that is descriptive of the info stored in the variable - if your variable name is more than one word squished together, use camelCase or under_scores to make it easier to read. Some examples: | Good | Bad | |:---: |:---: | | geneID | 3rdColumn (illegal) | | personCount | sdfsxwcnq (gibberish) | | input_file | person# (illegal) | | avgGeneCount | class (reserved word) | If you have proper syntax highlighting in your text editor, it should be obvious when you accidentally use a reserved word because it will be a different color than all your other variables! If not, a full list of reserved words can be found here: PyDocs Variables in action Below are several examples of code using variables. For each code block, first try to guess what the output will be, and then run the block to see the answer. A short explanation of what happened in each code block follows. End of explanation """ apples = 5 oranges = 10 fruit = apples + oranges print fruit """ Explanation: When we print a variable, Python knows that what we're really interested in is what's stored in the variable, not the variable itself. Therefore, Python automatically prints the contents of the variable when we do this kind of print statement. End of explanation """ apples = 5 oranges = 10 print apples + oranges """ Explanation: When two variables contain numbers, we can add them together as if they were the numbers themselves. Another important thing to note here is that in an assignment statement, everything on the right of the = will happen first. So in line 3, the apples + oranges part happens first, and the result is stored in fruit. End of explanation """ apples = 5 oranges = 10 print apples, oranges """ Explanation: Note that the addition is done before printing, and only the result is printed (much like adding literal numbers, as we saw before). End of explanation """ apples = 5 oranges = 10 print "I have", apples, "apples" """ Explanation: As we saw before, the comma is basically allows us to list multiple things we would like to print at once, without trying to add or concatenate them. A space is automatically inserted between each item. End of explanation """ people = 3 people = people + 1 print people """ Explanation: As above, we can mix and match different data types in our print statements when we use a comma. End of explanation """ people = 3 animals = 4 people = animals print people print animals """ Explanation: This is an important one to understand. What we did here was overwrite the value of people with the value of people + 1. The important thing to remember is that the right side of the = sign is evaluated first. So first Python figures out what people + 1 is, which is 3 + 1 or 4. Once that is completely done, it takes that result and stores it in the variable on the left. In this case, this overwrites the value that was already in people. Later on, this is how we will create counters, i.e. variables that we increment by 1 every time something happens. End of explanation """ name = "Joe Shmo" age = 20 print name,"will be",(age + 1),"next year" """ Explanation: Another example of overwriting a variable. Note that the value of animals is unchanged. End of explanation """ yourAge = "16" print "You will be", (yourAge + 1), "next year" """ Explanation: Here we just did some simple math within the print statement. End of explanation """ geneName = "Actb" readCount = 10375 """ Explanation: What happened? We put the number 16 in quotes--this makes it a string instead of a number! As we discussed above, Python can't add a string to a number, so it gives an error message. (You may also notice that it starts to print the message, but fails when we try to do the addition! Sometimes this can help you track down where errors are occuring.) [ Check yourself! ] Variable practice Think you got it? In the code block below, there are two variables. Write one additional line of code to print both variables on the same line. End of explanation """ print 1 + 1 print "1" + "1" print 1 + "1" """ Explanation: Data types Data comes in many types: numbers, words, letters, etc. In Python, certain types of data are treated differently. There are four main "data types" we'll be working with: String - a string is just another word for text. You can think of it as "a string of letters/characters". Strings are enclosed in double or single quotes to distinguish them from variables and commands (ex: "This is a string!" 'So is this!') Integer ("int") - this refers to whole numbers (same as in real life). In programming, integers are handled differently than non-integers, which is why we make this distinction. Floating point numbers ("float") - numbers with decimals. Booleans – True or False (1 or 0). We'll talk more about this later. As we've seen, different types of data are treated differently by Python: End of explanation """ print 1 + int("1") print "1" + str(1) print float("1") """ Explanation: Sometimes, like in the last example above, we'd actually like to convert one data type into another. Fortunately, Python provides simple built-in functions that allow us to do this in certain cases. Here's a partial list of the most useful conversion function: str() - converts a variable or piece of data to a string. Works on integers, floats, and booleans (and others). int() - converts a variable or piece of data to an integer. Works strings made up only of numbers (e.g. "123"), floats (decimal part will be truncated), and booleans (True converts to 1, False to 0). float() - converts a variable or piece of data to a float (decimal). Works on strings made up only of numbers (e.g. "123.45"), integers (a .0 will be added), and booleans (True converts to 1.0, False to 0.0) Let's look at a few example: End of explanation """ age = "16" print float(age) age = "16" print int(age) + 1 """ Explanation: A variable takes on the "type" of whatever data it is currently storing. So a variable holding a string has the string type, and a variable holding an integer has the integer type. Thus, we can apply the type conversion functions to variables as well: End of explanation """ print 2 + 2 print 5 - 10 print 5 * 5 print 10 / 2 """ Explanation: 6. Basic Math Math in Python uses many of the symbols and conventions you're already used to from traditional mathematics: End of explanation """ print 2 + 5 * 5 print (2 + 5) * 5 """ Explanation: Order of operations (P.E.M.D.A.S.) is maintained: End of explanation """ print 5 ** 2 """ Explanation: There are also a few notations that you might not be familiar with. Exponents: End of explanation """ print 5 % 2 """ Explanation: Remainder (aka modulus or "mod"): End of explanation """ print 6 / 2 print 5 / 2 print 5 / 3 print 5 / 4 """ Explanation: However, the most important difference to watch out for is integer division. Run the following examples: End of explanation """ print 5.0 / 2 print float(5) / 2 print 5 / float(2) """ Explanation: As you can see, only 6 / 2 gave the correct answer -- all the other answers appear as if they've been rounded down. Why is this? Basically, it is a somewhat odd rule in Python that whenever you divide two integers, Python always returns an integer answer. If the answer should have a decimal component, the decimal is simply truncated (e.g. 2.5 is truncated to 2). To get a proper answer, at least one of the numbers being divided must be a float: End of explanation """ # This is a comment line! It won't be printed! print "Hello, friend" # Use comments to leave notes on what your code is doing print "Nice day we're having, isn't it?" # ...or to temporarily prevent certain lines of code from executing # print 1 + "2" + holy illegal operations, batman! + 42.8g$ # You can put them almost anywhere! print "Well, goodbye then" # even here! """ Explanation: This is a very common source of errors, so always keep it in mind when you divide! When in doubt, convert one number with float(). 7. Commenting code Our final topic for the day is a simple but important one: commenting your code. A "comment" is a line of code that Python ignores when it executes your code. We mark these lines by starting them with a hash/pound sign (#). Here's an example: End of explanation """ """ This here is a multi-line comment. Make sure to end it with matching quotes! """ print "Aye aye!" """ Explanation: You can also make comments that span multiple lines, using triple quotes (""") like so: End of explanation """ print "what's", "up" print "what's" + "up" print "I have", 5, "cats" print "I have" + 5 + "cats" print 9 - 6 * 2 print (9 - 6) * 2 print 24 % 6 print 24 % 7 print -3 ** 2 print 9 / 2 print 9.0 / 2 print 9 / float(2) x = 5 print x * 3 x = "5" print x * 3 x = "5" print int(x) * 3 x = "cat" y = x print y x = "cat" y = "dog" x = y y = x print y x = "cat" y = "dog" print x + y x = 5 x = 1 print x x = 5 x + 1 print x x = 2 y = 4 print (x * y) ** x x = 16 print x ** 0.5 """ Explanation: When should I use comments? Comments are meant to improve the understandability of your code to another person (and possibly yourself in the future). Use them whenever you think a piece of code might be particularly confusing to a reader. You can also use them to "section" your code. One strategy is to write comments first, before the code, and use them as an outline for the structure of the code. One mistake beginners make is to actually comment too much. You don't need to comment things that are standard and obvious, just the parts that are most likely to be confusing. Most importantly: always keep your comments up to date! Inaccurate comments are worse than no comments at all, because they mislead the reader and can cause false assumptions. If you make major changes to your code, always check your comments to make sure they have not become inaccurate. 8. Test your understanding: practice set 1 For the following blocks of code, first try to guess what the output will be, and then run the code yourself. These examples may introduce some ideas and common pitfalls that were not explicitly covered in the text above, so be sure to complete this section. End of explanation """
flohorovicic/pynoddy
docs/notebooks/Training_Set_3.ipynb
gpl-2.0
%matplotlib inline # here the usual imports. If any of the imports fails, # make sure that pynoddy is installed # properly, ideally with 'python setup.py develop' # or 'python setup.py install' import sys, os import matplotlib.pyplot as plt import numpy as np # adjust some settings for matplotlib from matplotlib import rcParams # print rcParams rcParams['font.size'] = 15 # determine path of repository to set paths corretly below repo_path = os.path.realpath('../..') sys.path.append(repo_path) import pynoddy import pynoddy.history import pynoddy.experiment import importlib importlib.reload(pynoddy.experiment) rcParams.update({'font.size': 15}) # From notebook 4/ Traning Set example 1: importlib.reload(pynoddy.history) importlib.reload(pynoddy.events) nm = pynoddy.history.NoddyHistory() # add stratigraphy strati_options = {'num_layers' : 3, 'layer_names' : ['layer 1', 'layer 2', 'layer 3'], 'layer_thickness' : [1500, 500, 1500]} nm.add_event('stratigraphy', strati_options ) # The following options define the fault geometry: fault_options = {'name' : 'Fault_E', 'pos' : (4000, 0, 5000), 'dip_dir' : 90., 'dip' : 60, 'slip' : 1000} nm.add_event('fault', fault_options) history = 'normal_fault.his' output_name = 'normal_fault_out' nm.write_history(history) """ Explanation: Generate Training sets Based on "Reproducible Experiments" notebook End of explanation """ importlib.reload(pynoddy.history) importlib.reload(pynoddy.experiment) from pynoddy.experiment import monte_carlo ue = pynoddy.experiment.Experiment(history) ue.change_cube_size(100) ue.plot_section('y') """ Explanation: Initiate experiment with this input file: End of explanation """ ue.freeze() """ Explanation: Before we start to draw random realisations of the model, we should first store the base state of the model for later reference. This is simply possibel with the freeze() method which stores the current state of the model as the "base-state": End of explanation """ ue.set_random_seed(12345) """ Explanation: We now intialise the random generator. We can directly assign a random seed to simplify reproducibility (note that this is not essential, as it would be for the definition in a script function: the random state is preserved within the model and could be retrieved at a later stage, as well!): End of explanation """ ue.info(events_only = True) ev2 = ue.events[2] ev2.properties """ Explanation: The next step is to define probability distributions to the relevant event parameters. Let's first look at the different events: End of explanation """ param_stats = [{'event' : 2, 'parameter': 'Slip', 'stdev': 300.0, 'type': 'normal'}, {'event' : 2, 'parameter': 'Dip', 'stdev': 10.0, 'type': 'normal'},] ue.set_parameter_statistics(param_stats) resolution = 100 ue.change_cube_size(resolution) tmp = ue.get_section('y') prob_2 = np.zeros_like(tmp.block[:,:,:]) n_draws = 10 for i in range(n_draws): ue.random_draw() tmp = ue.get_section('y', resolution = resolution) prob_2 += (tmp.block[:,:,:] == 2) # Normalise prob_2 = prob_2 / float(n_draws) fig = plt.figure(figsize = (12,8)) ax = fig.add_subplot(111) ax.imshow(prob_2.transpose()[:,0,:], origin = 'lower left', interpolation = 'none') plt.title("Estimated probability of unit 4") plt.xlabel("x (E-W)") plt.ylabel("z") """ Explanation: Next, we define the probability distributions for the uncertain input parameters: End of explanation """ ue.random_draw() s1 = ue.get_section('y') s1.block.shape s1.block[np.where(s1.block == 3)] = 1 s1.plot_section('y', cmap='Greys') """ Explanation: This example shows how the base module for reproducible experiments with kinematics can be used. For further specification, child classes of Experiment can be defined, and we show examples of this type of extension in the next sections. Adjustments to generate training set First step: generate more layers and randomly select layers to visualise: End of explanation """ nm = pynoddy.history.NoddyHistory() # add stratigraphy n_layers = 8 strati_options['num_layers'] = n_layers strati_options['layer_names'] = [] strati_options['layer_thickness'] = [] for n in range(n_layers): strati_options['layer_names'].append("layer %d" % n) strati_options['layer_thickness'].append(5000./n_layers) nm.add_event('stratigraphy', strati_options ) # The following options define the fault geometry: fault_options = {'name' : 'Fault_E', 'pos' : (1000, 0, 5000), 'dip_dir' : 90., 'dip' : 60, 'slip' : 500} nm.add_event('fault', fault_options) history = 'normal_fault.his' output_name = 'normal_fault_out' nm.write_history(history) importlib.reload(pynoddy.history) importlib.reload(pynoddy.experiment) from pynoddy.experiment import monte_carlo ue = pynoddy.experiment.Experiment(history) ue.freeze() ue.set_random_seed(12345) ue.set_extent(2800, 100, 2800) ue.change_cube_size(50) ue.plot_section('y') param_stats = [{'event' : 2, 'parameter': 'Slip', 'stdev': 100.0, 'type': 'lognormal'}, {'event' : 2, 'parameter': 'Dip', 'stdev': 10.0, 'type': 'normal'}, # {'event' : 2, # 'parameter': 'Y', # 'stdev': 150.0, # 'type': 'normal'}, {'event' : 2, 'parameter': 'X', 'stdev': 150.0, 'type': 'normal'},] ue.set_parameter_statistics(param_stats) # randomly select layers: ue.random_draw() s1 = ue.get_section('y') # create "feature" model: f1 = s1.block.copy() # randomly select layers: f1 = np.squeeze(f1) # n_featuers: number of "features" -> gray values in image n_features = 5 vals = np.random.randint(0,255,size=n_features) for n in range(n_layers): f1[f1 == n] = np.random.choice(vals) f1.shape plt.imshow(f1.T, origin='lower_left', cmap='Greys', interpolation='nearest') # blur image from scipy import ndimage f2 = ndimage.filters.gaussian_filter(f1, 1, mode='nearest') plt.imshow(f2.T, origin='lower_left', cmap='Greys', interpolation='nearest', vmin=0, vmax=255) # randomly swap image if np.random.randint(2) == 1: f2 = f2[::-1,:] plt.imshow(f2.T, origin='lower_left', cmap='Greys', interpolation='nearest', vmin=0, vmax=255) """ Explanation: Idea: generate many layers, then randomly extract a couple of these and also assign different density/ color values: End of explanation """ # back to before: re-initialise model: nm = pynoddy.history.NoddyHistory() # add stratigraphy n_layers = 18 strati_options['num_layers'] = n_layers strati_options['layer_names'] = [] strati_options['layer_thickness'] = [] for n in range(n_layers): strati_options['layer_names'].append("layer %d" % n) strati_options['layer_thickness'].append(5000./n_layers) nm.add_event('stratigraphy', strati_options ) # The following options define the fault geometry: fault_options = {'name' : 'Fault_E', 'pos' : (1000, 0, 5000), 'dip_dir' : 90., 'dip' : 60, 'slip' : 500} nm.add_event('fault', fault_options) history = 'normal_fault.his' output_name = 'normal_fault_out' nm.write_history(history) from pynoddy.experiment import monte_carlo ue = pynoddy.experiment.Experiment(history) ue.freeze() ue.set_random_seed(12345) ue.set_extent(2800, 100, 2800) ue.change_cube_size(50) param_stats = [{'event' : 2, 'parameter': 'Slip', 'stdev': 100.0, 'type': 'lognormal'}, {'event' : 2, 'parameter': 'Dip', 'stdev': 10.0, 'type': 'normal'}, # {'event' : 2, # 'parameter': 'Y', # 'stdev': 150.0, # 'type': 'normal'}, {'event' : 2, 'parameter': 'X', 'stdev': 150.0, 'type': 'normal'},] ue.set_parameter_statistics(param_stats) """ Explanation: All in one function Generate images for normal faults End of explanation """ n_train = 10000 F_train = np.empty((n_train, 28*28)) ue.change_cube_size(100) for i in range(n_train): # randomly select layers: ue.random_draw() s1 = ue.get_section('y') # create "feature" model: f1 = s1.block.copy() # randomly select layers: f1 = np.squeeze(f1) # n_featuers: number of "features" -> gray values in image n_features = 4 vals = np.random.randint(0,255,size=n_features) for n in range(n_layers): f1[f1 == n+1] = np.random.choice(vals) f1 = f1.T f2 = ndimage.filters.gaussian_filter(f1, 0, mode='nearest') # scale image f2 = f2 - np.min(f2) if np.max(f2) != 0: f2 = f2/np.max(f2)*255 # randomly swap image if np.random.randint(2) == 1: f2 = f2[::-1,:] F_train[i] = f2.flatten().T plt.imshow(f2, origin='lower_left', cmap='Greys', interpolation='nearest', vmin=0, vmax=255) import matplotlib.pyplot as plt %matplotlib inline fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True, figsize=(12,6)) ax = ax.flatten() for i in range(10): img = F_train[i].reshape(28, 28) ax[i].imshow(img, cmap='Greys', interpolation='nearest') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() # plt.savefig('./figures/mnist_all.png', dpi=300) plt.show() import pickle f = open("f_train_normal.pkl", 'wb') pickle.dump(F_train, f) """ Explanation: Generate training set for normal faults: End of explanation """ # back to before: re-initialise model: nm = pynoddy.history.NoddyHistory() # add stratigraphy n_layers = 18 strati_options['num_layers'] = n_layers strati_options['layer_names'] = [] strati_options['layer_thickness'] = [] for n in range(n_layers): strati_options['layer_names'].append("layer %d" % n) strati_options['layer_thickness'].append(5000./n_layers) nm.add_event('stratigraphy', strati_options ) # The following options define the fault geometry: fault_options = {'name' : 'Fault_E', 'pos' : (1000, 0, 5000), 'dip_dir' : 90., 'dip' : 60, 'slip' : -500} nm.add_event('fault', fault_options) history = 'normal_fault.his' output_name = 'normal_fault_out' nm.write_history(history) reload(pynoddy.history) reload(pynoddy.experiment) from pynoddy.experiment import monte_carlo ue = pynoddy.experiment.Experiment(history) ue.freeze() ue.set_random_seed(12345) ue.set_extent(2800, 100, 2800) ue.change_cube_size(50) param_stats = [{'event' : 2, 'parameter': 'Slip', 'stdev': -100.0, 'type': 'lognormal'}, {'event' : 2, 'parameter': 'Dip', 'stdev': 10.0, 'type': 'normal'}, # {'event' : 2, # 'parameter': 'Y', # 'stdev': 150.0, # 'type': 'normal'}, {'event' : 2, 'parameter': 'X', 'stdev': 150.0, 'type': 'normal'},] ue.set_parameter_statistics(param_stats) n_train = 10000 F_train_rev = np.empty((n_train, 28*28)) ue.change_cube_size(100) for i in range(n_train): # randomly select layers: ue.random_draw() s1 = ue.get_section('y') # create "feature" model: f1 = s1.block.copy() # randomly select layers: f1 = np.squeeze(f1) # n_featuers: number of "features" -> gray values in image n_features = vals = np.random.randint(0,255,size=n_features) for n in range(n_layers): f1[f1 == n+1] = np.random.choice(vals) f1 = f1.T f2 = ndimage.filters.gaussian_filter(f1, 0, mode='nearest') # scale image f2 = f2 - np.min(f2) if np.max(f2) != 0: f2 = f2/np.max(f2)*255 # randomly swap image if np.random.randint(2) == 1: f2 = f2[::-1,:] F_train_rev[i] = f2.flatten().T fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True, figsize=(12,6)) ax = ax.flatten() for i in range(10): img = F_train_rev[i].reshape(28, 28) ax[i].imshow(img, cmap='Greys', interpolation='nearest') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() # plt.savefig('./figures/mnist_all.png', dpi=300) plt.show() pickle.dump(F_train_rev, open("f_train_reverse.pkl", 'w')) """ Explanation: Generate reverse faults And now: the same for reverse faults: End of explanation """ l1 = np.empty_like(s1.block[:,0,:]) n_layers = 18 for i in range(l1.shape[0]): l1[:,i] = i l1_ori = np.floor(l1*n_layers/l1.shape[0]) F_train_line = np.empty((n_train, 28*28)) for i in range(n_train): n_features = 4 vals = np.random.randint(0,255,size=n_features) l1 = l1_ori.copy() for n in range(n_layers): l1[l1 == n+1] = np.random.choice(vals) f1 = l1.T f2 = ndimage.filters.gaussian_filter(f1, 0, mode='nearest') # scale image f2 = f2 - np.min(f2) if np.max(f2) != 0: f2 = f2/np.max(f2)*255 F_train_line[i] = f2.flatten().T fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True, figsize=(12,6)) ax = ax.flatten() for i in range(10): img = F_train_line[i].reshape(28, 28) ax[i].imshow(img, cmap='Greys', interpolation='nearest') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() # plt.savefig('./figures/mnist_all.png', dpi=300) plt.show() pickle.dump(F_train_line, open("f_train_line.pkl", 'w')) """ Explanation: Generate simple layer structure No need for noddy, in this simple case - just adapt a numpy array: End of explanation """
AlphaGit/deep-learning
sentiment-rnn/Sentiment_RNN.ipynb
mit
import numpy as np import tensorflow as tf with open('../sentiment-network/reviews.txt', 'r') as f: reviews = f.read() with open('../sentiment-network/labels.txt', 'r') as f: labels = f.read() reviews[:2000] """ Explanation: Sentiment Analysis with an RNN In this notebook, you'll implement a recurrent neural network that performs sentiment analysis. Using an RNN rather than a feedfoward network is more accurate since we can include information about the sequence of words. Here we'll use a dataset of movie reviews, accompanied by labels. The architecture for this network is shown below. <img src="assets/network_diagram.png" width=400px> Here, we'll pass in words to an embedding layer. We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the word2vec lesson. You can actually train up an embedding with word2vec and use it here. But it's good enough to just have an embedding layer and let the network learn the embedding table on it's own. From the embedding layer, the new representations will be passed to LSTM cells. These will add recurrent connections to the network so we can include information about the sequence of words in the data. Finally, the LSTM cells will go to a sigmoid output layer here. We're using the sigmoid because we're trying to predict if this text has positive or negative sentiment. The output layer will just be a single unit then, with a sigmoid activation function. We don't care about the sigmoid outputs except for the very last one, we can ignore the rest. We'll calculate the cost from the output of the last step and the training label. End of explanation """ from string import punctuation all_text = ''.join([c for c in reviews if c not in punctuation]) reviews = all_text.split('\n') all_text = ' '.join(reviews) words = all_text.split() all_text[:2000] words[:100] """ Explanation: Data preprocessing The first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit. You can see an example of the reviews data above. We'll want to get rid of those periods. Also, you might notice that the reviews are delimited with newlines \n. To deal with those, I'm going to split the text into each review using \n as the delimiter. Then I can combined all the reviews back together into one big string. First, let's remove all punctuation. Then get all the text without the newlines and split it into individual words. End of explanation """ # Create your dictionary that maps vocab words to integers here unique_words = list(set(words)) vocab_to_int = { word: (idx + 1) for idx, word in enumerate(unique_words) } # Convert the reviews to integers, same shape as reviews list, but with integers reviews_ints = [ [ vocab_to_int[w] for w in r.split() ] for r in reviews ] """ Explanation: Encoding the words The embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network. Exercise: Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers start at 1, not 0. Also, convert the reviews to integers and store the reviews in a new list called reviews_ints. End of explanation """ # Convert labels to 1s and 0s for 'positive' and 'negative' labels = [ 1 if l == 'positive' else 0 for l in labels.split('\n') ] """ Explanation: Encoding the labels Our labels are "positive" or "negative". To use these labels in our network, we need to convert them to 0 and 1. Exercise: Convert labels from positive and negative to 1 and 0, respectively. End of explanation """ from collections import Counter review_lens = Counter([len(x) for x in reviews_ints]) print("Zero-length reviews: {}".format(review_lens[0])) print("Maximum review length: {}".format(max(review_lens))) """ Explanation: If you built labels correctly, you should see the next output. End of explanation """ # Filter out that review with 0 length reviews_ints = [ r for r in reviews_ints if len(r) > 0 ] """ Explanation: Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters. Exercise: First, remove the review with zero length from the reviews_ints list. End of explanation """ seq_len = 200 features = [ [0] * (seq_len - len(r)) + r[:200] for r in reviews_ints ] """ Explanation: Exercise: Now, create an array features that contains the data we'll pass to the network. The data should come from review_ints, since we want to feed integers to the network. Each row should be 200 elements long. For reviews shorter than 200 words, left pad with 0s. That is, if the review is ['best', 'movie', 'ever'], [117, 18, 128] as integers, the row will look like [0, 0, 0, ..., 0, 117, 18, 128]. For reviews longer than 200, use on the first 200 words as the feature vector. This isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data. End of explanation """ features[:10][:100] """ Explanation: If you build features correctly, it should look like that cell output below. End of explanation """ split_frac = 0.8 train_x, val_x = [ np.array(features[:int(len(features)*split_frac)]), np.array(features[int(len(features)*split_frac):]) ] train_y, val_y = [ np.array(labels[:int(len(labels)*split_frac)]), np.array(labels[int(len(labels)*split_frac):]) ] val_x, test_x = [ val_x[:int(len(val_x) / 2)], val_x[int(len(val_x) / 2):] ] val_y, test_y = [ val_y[:int(len(val_y) / 2)], val_y[int(len(val_y) / 2):] ] print("\t\t\tFeature Shapes:") print("Train set: \t\t{}".format(train_x.shape), "\nValidation set: \t{}".format(val_x.shape), "\nTest set: \t\t{}".format(test_x.shape)) """ Explanation: Training, Validation, Test With our data in nice shape, we'll split it into training, validation, and test sets. Exercise: Create the training, validation, and test sets here. You'll need to create sets for the features and the labels, train_x and train_y for example. Define a split fraction, split_frac as the fraction of data to keep in the training set. Usually this is set to 0.8 or 0.9. The rest of the data will be split in half to create the validation and testing data. End of explanation """ lstm_size = 256 lstm_layers = 1 batch_size = 500 learning_rate = 0.001 """ Explanation: With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like: Feature Shapes: Train set: (20000, 200) Validation set: (2500, 200) Test set: (2500, 200) Build the graph Here, we'll build the graph. First up, defining the hyperparameters. lstm_size: Number of units in the hidden layers in the LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc. lstm_layers: Number of LSTM layers in the network. I'd start with 1, then add more if I'm underfitting. batch_size: The number of reviews to feed the network in one training pass. Typically this should be set as high as you can go without running out of memory. learning_rate: Learning rate End of explanation """ n_words = len(vocab_to_int) vocab = sorted(counts, key=counts.get, reverse=True) # Create the graph object graph = tf.Graph() # Add nodes to the graph with graph.as_default(): inputs_ = tf.placeholder(tf.int32, [None, None], name="inputs") labels_ = tf.placeholder(tf.int32, [None, None], name="labels") keep_prob = tf.placeholder(tf.float32, name="keep_prob") """ Explanation: For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability. Exercise: Create the inputs_, labels_, and drop out keep_prob placeholders using tf.placeholder. labels_ needs to be two-dimensional to work with some functions later. Since keep_prob is a scalar (a 0-dimensional tensor), you shouldn't provide a size to tf.placeholder. End of explanation """ # Size of the embedding vectors (number of units in the embedding layer) embed_size = 300 with graph.as_default(): embedding = tf.Variable(tf.random_uniform((n_words, embed_size), -1, 1), name="embedding") embed = tf.nn.embedding_lookup(embedding, inputs_, name="embed") """ Explanation: Embedding Now we'll add an embedding layer. We need to do this because there are 74000 words in our vocabulary. It is massively inefficient to one-hot encode our classes here. You should remember dealing with this problem from the word2vec lesson. Instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using word2vec, then load it here. But, it's fine to just make a new layer and let the network learn the weights. Exercise: Create the embedding lookup matrix as a tf.Variable. Use that embedding matrix to get the embedded vectors to pass to the LSTM cell with tf.nn.embedding_lookup. This function takes the embedding matrix and an input tensor, such as the review vectors. Then, it'll return another tensor with the embedded vectors. So, if the embedding layer has 200 units, the function will return a tensor with size [batch_size, 200]. End of explanation """ with graph.as_default(): # Your basic LSTM cell lstm = tf.nn.rnn_cell.BasicLSTMCell(lstm_size) # Add dropout to the cell drop = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=keep_prob) # Stack up multiple LSTM layers, for deep learning cell = tf.nn.rnn_cell.MultiRNNCell([drop] * lstm_layers) # Getting an initial state of all zeros initial_state = cell.zero_state(batch_size, tf.float32) """ Explanation: LSTM cell <img src="assets/network_diagram.png" width=400px> Next, we'll create our LSTM cells to use in the recurrent network (TensorFlow documentation). Here we are just defining what the cells look like. This isn't actually building the graph, just defining the type of cells we want in our graph. To create a basic LSTM cell for the graph, you'll want to use tf.contrib.rnn.BasicLSTMCell. Looking at the function documentation: tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=&lt;function tanh at 0x109f1ef28&gt;) you can see it takes a parameter called num_units, the number of units in the cell, called lstm_size in this code. So then, you can write something like lstm = tf.contrib.rnn.BasicLSTMCell(num_units) to create an LSTM cell with num_units. Next, you can add dropout to the cell with tf.contrib.rnn.DropoutWrapper. This just wraps the cell in another cell, but with dropout added to the inputs and/or outputs. It's a really convenient way to make your network better with almost no effort! So you'd do something like drop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob) Most of the time, your network will have better performance with more layers. That's sort of the magic of deep learning, adding more layers allows the network to learn really complex relationships. Again, there is a simple way to create multiple layers of LSTM cells with tf.contrib.rnn.MultiRNNCell: cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers) Here, [drop] * lstm_layers creates a list of cells (drop) that is lstm_layers long. The MultiRNNCell wrapper builds this into multiple layers of RNN cells, one for each cell in the list. So the final cell you're using in the network is actually multiple (or just one) LSTM cells with dropout. But it all works the same from an achitectural viewpoint, just a more complicated graph in the cell. Exercise: Below, use tf.contrib.rnn.BasicLSTMCell to create an LSTM cell. Then, add drop out to it with tf.contrib.rnn.DropoutWrapper. Finally, create multiple LSTM layers with tf.contrib.rnn.MultiRNNCell. Here is a tutorial on building RNNs that will help you out. End of explanation """ with graph.as_default(): outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state) """ Explanation: RNN forward pass <img src="assets/network_diagram.png" width=400px> Now we need to actually run the data through the RNN nodes. You can use tf.nn.dynamic_rnn to do this. You'd pass in the RNN cell you created (our multiple layered LSTM cell for instance), and the inputs to the network. outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state) Above I created an initial state, initial_state, to pass to the RNN. This is the cell state that is passed between the hidden layers in successive time steps. tf.nn.dynamic_rnn takes care of most of the work for us. We pass in our cell and the input to the cell, then it does the unrolling and everything else for us. It returns outputs for each time step and the final_state of the hidden layer. Exercise: Use tf.nn.dynamic_rnn to add the forward pass through the RNN. Remember that we're actually passing in vectors from the embedding layer, embed. End of explanation """ with graph.as_default(): predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid) cost = tf.contrib.losses.mean_squared_error(labels_, predictions) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) """ Explanation: Output We only care about the final output, we'll be using that as our sentiment prediction. So we need to grab the last output with outputs[:, -1], the calculate the cost from that and labels_. End of explanation """ with graph.as_default(): correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) """ Explanation: Validation accuracy Here we can add a few nodes to calculate the accuracy which we'll use in the validation pass. End of explanation """ def get_batches(x, y, batch_size=100): n_batches = len(x)//batch_size x, y = x[:n_batches*batch_size], y[:n_batches*batch_size] for ii in range(0, len(x), batch_size): yield x[ii:ii+batch_size], y[ii:ii+batch_size] """ Explanation: Batching This is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the x and y arrays and returns slices out of those arrays with size [batch_size]. End of explanation """ for a in get_batches(train_x, train_y, batch_size): print(np.max(a[0])) epochs = 10 with graph.as_default(): saver = tf.train.Saver() with tf.Session(graph=graph) as sess: sess.run(tf.global_variables_initializer()) iteration = 1 for e in range(epochs): state = sess.run(initial_state) for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1): feed = {inputs_: x, labels_: y[:, None], keep_prob: 0.5, initial_state: state} loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed) if iteration%5==0: print("Epoch: {}/{}".format(e, epochs), "Iteration: {}".format(iteration), "Train loss: {:.3f}".format(loss)) if iteration%25==0: val_acc = [] val_state = sess.run(cell.zero_state(batch_size, tf.float32)) for x, y in get_batches(val_x, val_y, batch_size): feed = {inputs_: x, labels_: y[:, None], keep_prob: 1, initial_state: val_state} batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed) val_acc.append(batch_acc) print("Val acc: {:.3f}".format(np.mean(val_acc))) iteration +=1 saver.save(sess, "checkpoints/sentiment.ckpt") """ Explanation: Training Below is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the checkpoints directory exists. End of explanation """ test_acc = [] with tf.Session(graph=graph) as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) test_state = sess.run(cell.zero_state(batch_size, tf.float32)) for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1): feed = {inputs_: x, labels_: y[:, None], keep_prob: 1, initial_state: test_state} batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed) test_acc.append(batch_acc) print("Test accuracy: {:.3f}".format(np.mean(test_acc))) """ Explanation: Testing End of explanation """
bollwyvl/ip-bootstrap
docs/Icon.ipynb
bsd-3-clause
from IPython.html import widgets from ipbs.widgets import Icon import ipbs.bootstrap as bs from ipbs.icons import FontAwesome, Size """ Explanation: Icon End of explanation """ fa = FontAwesome() """ Explanation: First, grab a FontAwesome instance which knows about all of the icons. End of explanation """ fa.space_shuttle """ Explanation: fa exposes Python-friendly, autocompletable names for all of the FontAwesome icons, and you can preview them immediately. End of explanation """ fa.space_shuttle.rotate_270 * 3 """ Explanation: You can apply effects like rotation and scaling. End of explanation """ icon = Icon(fa.space_shuttle) icon """ Explanation: The actual widget supports the stack case, such that you can display a single icon... End of explanation """ icon = Icon(fa.square * 2, fa.empire.context_inverse, size=Size.x3) icon """ Explanation: Or several icons stacked together... End of explanation """
cstrelioff/ARM-ipynb
Chapter3/chptr3.1.ipynb
mit
from __future__ import print_function, division %matplotlib inline import matplotlib import numpy as np import pandas as pd import matplotlib.pyplot as plt # use matplotlib style sheet plt.style.use('ggplot') # import statsmodels for R-style regression import statsmodels.formula.api as smf """ Explanation: 3.1: One predictor End of explanation """ kidiq = pd.read_stata("../../ARM_Data/child.iq/kidiq.dta") kidiq.head() """ Explanation: Read the data Data are in the child.iq directory of the ARM_Data download-- you might have to change the path I use below to reflect the path on your computer. End of explanation """ fit0 = smf.ols('kid_score ~ mom_hs', data=kidiq).fit() print(fit0.summary()) """ Explanation: First regression-- binary predictor, Pg 31 Fit the regression using the non-jittered data End of explanation """ fig0, ax0 = plt.subplots(figsize=(8, 6)) hs_linspace = np.linspace(kidiq['mom_hs'].min(), kidiq['mom_hs'].max(), 50) # default color cycle colors = plt.rcParams['axes.color_cycle'] # plot points plt.scatter(kidiq['mom_hs'], kidiq['kid_score'], s=60, alpha=0.5, c=colors[1]) # add fit plt.plot(hs_linspace, fit0.params[0] + fit0.params[1] * hs_linspace, lw=3, c=colors[1]) plt.xlabel("Mother completed high school") plt.ylabel("Child test score") """ Explanation: Plot Figure 3.1, Pg 32 A note for the python version: I have not included jitter, in the vertical or horizontal directions. Instead, the data is plotted with opacity so the regions with high data-density can be distinguished. End of explanation """ fit1 = smf.ols('kid_score ~ mom_iq', data=kidiq).fit() print(fit1.summary()) """ Explanation: Second regression -- continuous predictor, Pg 32 End of explanation """ fig1, ax1 = plt.subplots(figsize=(8, 6)) iq_linspace = np.linspace(kidiq['mom_iq'].min(), kidiq['mom_iq'].max(), 50) # default color cycle colors = plt.rcParams['axes.color_cycle'] # plot points plt.scatter(kidiq['mom_iq'], kidiq['kid_score'], s=60, alpha=0.5, c=colors[1]) # add fit plt.plot(iq_linspace, fit1.params[0] + fit1.params[1] * iq_linspace, lw=3, c=colors[1]) plt.xlabel("Mother IQ score") plt.ylabel("Child test score") """ Explanation: Figure 3.2, Pg 33 End of explanation """
antonpetkoff/learning
text-mining/TM_lab08_MLP_Reg.ipynb
gpl-3.0
import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_20newsgroups from nltk import TweetTokenizer from tensorflow.keras import layers from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.utils import to_categorical from tensorflow.keras.models import Model from tensorflow.keras.losses import categorical_crossentropy from tensorflow.keras import regularizers, initializers def next_batch(x_, y_, batch_size, ids = None): if (ids is None): # Random sample from the dataset. It can be sequential (but must be shuffled) within epoch, that will guarantee that you'll use all the data. # The two approaches are practically equal when using a large number of epochs. ids = np.random.choice(x_.shape[0], batch_size, replace=False) feed_dict = { 'x': x_[ids], 'y': y_[ids] } return feed_dict def tweet_tokenize(text): tknzr = TweetTokenizer(preserve_case=True, strip_handles=True) return tknzr.tokenize(text) def evalute_accuracy(x, y): return sess.run(accuracy, feed_dict = next_batch(x, y, len(x))) hparams = tf.contrib.training.HParams( batch_size = 32, max_epochs = 100, max_features = 1000, learning_rate = 0.03, reg_param = 0.03, dropout_keep_prob = 0.9, use_dropout = False, use_early_stoppoing = True, early_stopping_patience = 3, use_l2_reg = False, layers = 2, seed = 42 ) """ Explanation: Text mining Anton Petkov, 25915, Sofia University Lab 08 Multi-Layer Perceptron on Reuters with reguarization techniques We are going to train a Neural Network to predict the origin of a document coming from the 20newsgroup dataset. For this puprose we'll use Tensorflow, and sklearn. Your job is to fill in the missing code into the cells below. You will find the steps you need to perform in the Task section in each cell. Homework Tasks Load and preprocess the data for 20newsgroups Create a multi layer perceptron (MLP) with N hidden layers with ReLU activation, and an Softmax output layer. Compute it's gradients (manually or using tensorflow's API) Compile the model with 'categorical_crossentropy' loss, and add metric 'accuracy' Fill the dropout logic (it must be per layer) Fill the l2 regularization logic (it must be per layer) Fill the logic for early stopping Fix the plotting function plot_history() Submission You must submit your code with experiments: 1. Different number of layers 2. Compare l2, droput and early stopping 3. Learning curves for train and test metrics (acc, loss) per each experiment End of explanation """ print('Loading data...') # Passing none as we want to train over all the data. newsgroups_train = fetch_20newsgroups(subset='train', categories=None) newsgroups_test = fetch_20newsgroups(subset='test', categories=None) print('Data loaded.') """ Explanation: Data Loading We are going to use the 20newsgroup dataset for multi-class text classification with Tensorflow. First we use the fetch_20newgroup module from sklearn. End of explanation """ num_classes = np.max(newsgroups_train.target) + 1 print(num_classes, 'classes') print('Vectorizing sequence data...') tokenizer = Tokenizer(num_words=hparams.max_features) tokenizer.fit_on_texts(newsgroups_train.data) x_train = tokenizer.texts_to_matrix(newsgroups_train.data, mode='binary') x_test = tokenizer.texts_to_matrix(newsgroups_test.data, mode='binary') print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) print('Convert class vector to binary class matrix ' '(for use with categorical_crossentropy)') y_train = to_categorical(newsgroups_train.target, num_classes) y_test = to_categorical(newsgroups_test.target, num_classes) print('y_train shape:', y_train.shape) print('y_test shape:', y_test.shape) """ Explanation: Preprocessing In this paragraph you need to pre-process your data and create vectors suitable for feeding the NN. You can try different transorfmations and features, TFIDF would be a good start. You can use:< Tokenizer from Keras, and to convert the list in newsgrops_*.data into BOW (Bag-Of-Words) vectors. Convert the labels to OneHot encoded vectors e.g. for label '2' your vector should look like this [0, 0, ..., 1, 0] helpers can be found here; Expected output 20 classes Vectorizing sequence data... x_train shape: (11314, max_features) x_test shape: (7532, max_features) Convert class vector to binary class matrix (for use with categorical_crossentropy) y_train shape: (11314, 20) y_test shape: (7532, 20) End of explanation """ def create_model(hparams): input_layer = layers.Input(shape=(hparams.max_features,), name='input') hidden = input_layer for i in range(hparams.layers): #create layers hidden = layers.Dense( 128, activation='relu', kernel_regularizer=regularizers.l2(hparams.reg_param) if hparams.use_l2_reg else None, kernel_initializer=initializers.glorot_normal(seed=hparams.seed), name='dense-{}'.format(i) )(hidden) if hparams.use_dropout: #use hparams.dropout_keep_prob and add dropout mask hidden = layers.Dropout(rate=1 - hparams.dropout_keep_prob)(hidden) # Softmax over classes for ouput output_layer = layers.Dense( num_classes, activation='softmax', kernel_regularizer=regularizers.l2(hparams.reg_param) if hparams.use_l2_reg else None, kernel_initializer=initializers.glorot_normal(seed=hparams.seed), name='output' )(hidden) if hparams.use_dropout: #use hparams.dropout_keep_prob and add dropout mask output_layer = layers.Dropout(rate=1 - hparams.dropout_keep_prob)(output_layer) model = Model(inputs=[input_layer], outputs=output_layer) # Minimize error using cross entropy model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) model.summary() return model model = create_model(hparams) """ Explanation: Model building For the architecture you can refer to the picture below. You can find detailed overview of backprop here. You can find detailed overview of regularization here. End of explanation """ def train_model(model, hparams): full_history = { 'loss': [[], []], 'acc': [[], []] } patience = 0 best_test_loss = np.inf best_epoch = 0 # Training cycle for epoch in range(hparams.max_epochs): history = model.fit( x=x_train, y=y_train, batch_size=hparams.batch_size, epochs=1, shuffle=True ) train_loss = history.history['loss'][0] train_acc = history.history['acc'][0] test_loss, test_acc = model.evaluate( x=x_test, y=y_test, batch_size=hparams.batch_size ) full_history['loss'][0].append(train_loss) full_history['loss'][1].append(test_loss) full_history['acc'][0].append(train_acc) full_history['acc'][1].append(test_acc) if hparams.use_early_stoppoing: if test_loss < best_test_loss: best_test_loss = test_loss best_epoch = epoch else: if patience < hparams.early_stopping_patience: patience = patience + 1 else: print('best epoch to stop is: {} with loss: {}'.format(best_epoch, best_test_loss)) break print("Optimization Finished!") return full_history history = train_model(model, hparams) def visualize_history(history, key='loss'): plt.plot(history[key][0]) plt.plot(history[key][1]) plt.title('model {}'.format(key)) plt.ylabel(key) plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') return plt visualize_history(history, key='loss').show() visualize_history(history, key='acc').show() def run_experiment(hparams, title='Experiment'): print('RUNNING EXPERIMENT: {}'.format(title)) model = create_model(hparams) history = train_model(model, hparams) visualize_history(history, key='loss').show() visualize_history(history, key='acc').show() final_test_loss = history['loss'][1][-1] final_test_acc = history['acc'][1][-1] print('Final test loss: {}'.format(final_test_loss)) print('Final test accuracy: {}'.format(final_test_acc)) """ Explanation: Model training In this section you'll only need to run the cells, you don't have to modify them! End of explanation """ run_experiment(tf.contrib.training.HParams( batch_size = 32, max_epochs = 100, max_features = 1000, learning_rate = 0.03, reg_param = 0.03, dropout_keep_prob = 0.9, use_dropout = False, use_early_stoppoing = False, early_stopping_patience = 1, use_l2_reg = True, layers = 2, seed = 42 ), title="1) 2 Layers, L2") run_experiment(tf.contrib.training.HParams( batch_size = 32, max_epochs = 100, max_features = 1000, learning_rate = 0.03, reg_param = 0.03, dropout_keep_prob = 0.9, use_dropout = True, use_early_stoppoing = False, early_stopping_patience = 1, use_l2_reg = False, layers = 2, seed = 42 ), title="2) 2 Layers, Dropout") run_experiment(tf.contrib.training.HParams( batch_size = 32, max_epochs = 100, max_features = 1000, learning_rate = 0.03, reg_param = 0.03, dropout_keep_prob = 0.9, use_dropout = False, use_early_stoppoing = True, early_stopping_patience = 3, use_l2_reg = False, layers = 2, seed = 42 ), title="3) 2 Layers, Early Stopping") run_experiment(tf.contrib.training.HParams( batch_size = 32, max_epochs = 100, max_features = 1000, learning_rate = 0.03, reg_param = 0.03, dropout_keep_prob = 0.9, use_dropout = True, use_early_stoppoing = False, early_stopping_patience = 3, use_l2_reg = True, layers = 2, seed = 42 ), title="4) 2 Layers, L2, Dropout") run_experiment(tf.contrib.training.HParams( batch_size = 32, max_epochs = 100, max_features = 1000, learning_rate = 0.03, reg_param = 0.03, dropout_keep_prob = 0.9, use_dropout = True, use_early_stoppoing = True, early_stopping_patience = 3, use_l2_reg = False, layers = 3, seed = 42 ), title="5) 3 Layers, Dropout, Early Stopping") run_experiment(tf.contrib.training.HParams( batch_size = 32, max_epochs = 100, max_features = 1000, learning_rate = 0.03, reg_param = 0.03, dropout_keep_prob = 0.9, use_dropout = False, use_early_stoppoing = True, early_stopping_patience = 3, use_l2_reg = True, layers = 3, seed = 42 ), title="6) 3 Layers, L2, Early Stopping") run_experiment(tf.contrib.training.HParams( batch_size = 32, max_epochs = 100, max_features = 1000, learning_rate = 0.03, reg_param = 0.03, dropout_keep_prob = 0.9, use_dropout = True, use_early_stoppoing = True, early_stopping_patience = 3, use_l2_reg = True, layers = 3, seed = 42 ), title="7) 3 Layers, L2, Dropout, Early Stopping") """ Explanation: Experiments All experiments were carried out with the following parameters set to: batch_size = 32 max_epochs = 100 max_features = 1000 learning_rate = 0.03 reg_param = 0.03 dropout_keep_prob = 0.9 seed = 42 The number of neurons in each layer is 128. A grid search on all these parameters would be too big, so we emphasize on Layer Count, using L2 Regularization, using Dropout or using Early Stopping. Table with results | Experiment # | Layer count | L2 Regularization | Dropout | Early Stopping | Accuracy | Loss | Comment | |-|-|-|-|-|-|-|-| | 1 | 2 | yes | no | no | 0.2493 | 2.8602 | | | 2 | 2 | no | yes | no | 0.0426 | NaN | gradient exploded| | 3 | 2 | no | no | yes | 0.6279 | 1.7183 | stopped at first epoch with patience 1 (also with patience 3) | | 4 | 2 | yes | yes | no | 0.1251 | 2.8817 | | | 5 | 3 | no | yes | yes | 0.6480 | 1.5360 | best result | | 6 | 3 | yes | no | yes | 0.1225 | 2.8889 | stopped at epoch 10 | | 7 | 3 | yes | yes | yes | 0.0528 | 2.9902 | | Analysis Early Stopping is very efficient, because it is computationally cheaper than Dropout and L2 Regularization and also early stopping doesn't modify the model itself. Thus, it is simpler than Dropout and L2. I expected better results with L2, but it has a very low accuracy score and even after 100 epochs the loss doesn't seem to improve. The best result is in bold and uses Early Stopping and Dropout with 3 layers. Each experiment has a graph with the loss and accuracy plotted for each epoch for both test and train datasets. End of explanation """
phenology/infrastructure
applications/notebooks/examples/python/connect_to_spark.ipynb
apache-2.0
#Add all dependencies to PYTHON_PATH import sys sys.path.append("/usr/lib/spark/python") sys.path.append("/usr/lib/spark/python/lib/py4j-0.10.4-src.zip") sys.path.append("/usr/lib/python3/dist-packages") #Define environment variables import os os.environ["HADOOP_CONF_DIR"] = "/etc/hadoop/conf" os.environ["PYSPARK_PYTHON"] = "python3" os.environ["PYSPARK_DRIVER_PYTHON"] = "ipython" #Load PySpark to connect to a Spark cluster from pyspark import SparkConf, SparkContext """ Explanation: Connect to Spark In this NoteBook the reader finds the code to connect to Spark and create a SparkContext. Dependencies End of explanation """ appName = "connect_to_spark" masterURL="spark://pheno0.phenovari-utwente.surf-hosted.nl:7077" #A context needs to be created if it does not already exist try: sc.stop() except NameError: print("A new Spark Context will be created.") sc = SparkContext(conf = SparkConf().setAppName(appName).setMaster(masterURL)) """ Explanation: Create Spark Context End of explanation """ sc.stop() """ Explanation: Close Spark Context End of explanation """
jpilgram/phys202-2015-work
project/NeuralNetworks.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt from IPython.html.widgets import interact from sklearn.datasets import load_digits digits = load_digits() print(digits.data.shape) def show_digit(i): plt.matshow(digits.images[i]); interact(show_digit, i=(0,100)); """ Explanation: Neural Networks This project was created by Brian Granger. All content is licensed under the MIT License. Introduction Neural networks are a class of algorithms that can learn how to compute the value of a function given previous examples of the functions output. Because neural networks are capable of learning how to compute the output of a function based on existing data, they generally fall under the field of Machine Learning. Let's say that we don't know how to compute some function $f$: $$ f(x) \rightarrow y $$ But we do have some data about the output that $f$ produces for particular input $x$: $$ f(x_1) \rightarrow y_1 $$ $$ f(x_2) \rightarrow y_2 $$ $$ \ldots $$ $$ f(x_n) \rightarrow y_n $$ A neural network learns how to use that existing data to compute the value of the function $f$ on yet unseen data. Neural networks get their name from the similarity of their design to how neurons in the brain work. Work on neural networks began in the 1940s, but significant advancements were made in the 1970s (backpropagation) and more recently, since the late 2000s, with the advent of deep neural networks. These days neural networks are starting to be used extensively in products that you use. A great example of the application of neural networks is the recently released Flickr automated image tagging. With these algorithms, Flickr is able to determine what tags ("kitten", "puppy") should be applied to each photo, without human involvement. In this case the function takes an image as input and outputs a set of tags for that image: $$ f(image) \rightarrow {tag_1, \ldots} $$ For the purpose of this project, good introductions to neural networks can be found at: The Nature of Code, Daniel Shiffman. Neural Networks and Deep Learning, Michael Nielsen. Data Science from Scratch, Joel Grus The Project Your general goal is to write Python code to predict the number associated with handwritten digits. The dataset for these digits can be found in sklearn: End of explanation """ digits.target """ Explanation: The actual, known values (0,1,2,3,4,5,6,7,8,9) associated with each image can be found in the target array: End of explanation """
ewulczyn/talk_page_abuse
src/analysis/Characterizing Context of Attacks.ipynb
apache-2.0
%load_ext autoreload %autoreload 2 %matplotlib inline import warnings warnings.filterwarnings('ignore') import matplotlib.pyplot as plt import seaborn as sns import numpy as np import pandas as pd from load_utils import * d = load_diffs() df_events, df_blocked_user_text = load_block_events_and_users() """ Explanation: Characterizing Context of Attacks: Are attacks isolated events, or do they occur in series? Are the product of provocateurs or a toxic environment? Do they occur on certain topics ? Is toxic behaviour reciprocated ? End of explanation """ pairs = d['2015'].query('not own_page and not author_anon and not recipient_anon')\ .groupby(['user_text', 'page_title'], as_index = False)['pred_aggression_score']\ .agg({'aggresssivness': np.mean, 'count': len})\ .query('count > 5')\ .assign(key = lambda x: 'From:' + x['user_text'] + ' to:' + x['page_title'], partner_key = lambda x: 'From:' + x['page_title'] + ' to:' + x['user_text'] ) pairs = pairs.merge(pairs, left_on = 'partner_key', right_on = 'key', how = 'inner' ) sns.jointplot(x = 'aggresssivness_x', y = 'aggresssivness_y', data = pairs) t_angry = np.percentile(pairs['aggresssivness_x'], 95) t_friendly = np.percentile(pairs['aggresssivness_y'], 5) sns.distplot(pairs.query('aggresssivness_x > %f' % t_angry)['aggresssivness_y'], hist=False, label = 'Angry A->B') sns.distplot(pairs.query('aggresssivness_x < %f' % t_friendly)['aggresssivness_y'], hist=False, label = 'Friendly A->B') plt.xlabel('Aggresiveness B->A') """ Explanation: Q: Is tone reciprocal? Methodology 1: is the average aggression score of what A says on B's page related to the average score of what B says on A's page? End of explanation """ cols = ['user_text', 'page_title', 'pred_aggression_score', 'rev_timestamp', 'rev_id'] ab = d['2015'].query('not own_page and not author_anon and not recipient_anon')[cols] ba = ab.copy().rename(columns = {'user_text': 'page_title', 'page_title': 'user_text'})[cols] micro_pairs = ab.merge(ba, on = ['user_text', 'page_title'], how = 'inner' )\ .assign(delta = lambda x: x['rev_timestamp_x'] - x['rev_timestamp_y'])\ .assign(delta_positive = lambda x: x.delta > pd.Timedelta('0 seconds'), delta_less_30 = lambda x: x.delta < pd.Timedelta('30 days'))\ .query('delta_positive and delta_less_30')\ .sort('delta', ascending=False)\ .groupby('rev_id_x', as_index=False).first() sns.jointplot(x = 'pred_aggression_score_x', y = 'pred_aggression_score_y', data = micro_pairs) t_friendly, t_neutral, t_angry = np.percentile(micro_pairs['pred_aggression_score_x'], (5, 50, 95)) sns.distplot(micro_pairs.query('pred_aggression_score_x > %f' % t_angry)['pred_aggression_score_y'], hist=False, label = 'Angry A->B') sns.distplot(micro_pairs.query('pred_aggression_score_x < %f' % t_friendly)['pred_aggression_score_y'], hist=False, label = 'Friendly A->B') plt.xlabel('Aggression B->A') """ Explanation: Methodology 2: is the aggression score of what A says on B's page related to the score of the next thing B says on A's page? End of explanation """ out_score = d['2015'].query('not own_page and not author_anon and not recipient_anon')\ .groupby(['user_text'], as_index = False)['pred_aggression_score']\ .agg({'out_score': np.mean, 'count': len})\ .query('count > 5') in_score = d['2015'].query('not own_page and not author_anon and not recipient_anon')\ .groupby(['page_title'], as_index = False)['pred_aggression_score']\ .agg({'in_score': np.mean, 'count': len})\ .query('count > 5')\ .rename(columns = {'page_title':'user_text'}) in_out = out_score.merge(in_score, how = 'inner', on = 'user_text') in_out['saintliness'] = in_out['out_score'] - in_out['in_score'] sns.jointplot(x = 'in_score', y = 'out_score', data = in_out) sns.distplot(in_out['saintliness'].dropna(), kde =False, norm_hist = True) # Saints in_out.sort_values('saintliness').head(5) # Saints in_out.sort_values('saintliness').query('in_score > 0 and out_score < 0' ).head(5) #d['2015'].query("user_text == 'Parenchyma18'") # Saints in_out.sort_values('saintliness').query('in_score > 0 and out_score < 0' ).head(5) # Provocateurs in_out.sort_values('saintliness', ascending = False).head(5) # Provocateurs in_out.sort_values('saintliness', ascending = False).query('out_score > 0 and in_score < 0').head(5) """ Explanation: Q: Saintliness vs. Provocativeness End of explanation """
tpin3694/tpin3694.github.io
python/pandas_dataframe_count_values.ipynb
mit
import pandas as pd """ Explanation: Title: Count Values In Pandas Dataframe Slug: pandas_dataframe_count_values Summary: Count Values In Pandas Dataframe Date: 2016-05-01 12:00 Category: Python Tags: Data Wrangling Authors: Chris Albon Import the pandas module End of explanation """ year = pd.Series([1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894]) guardCorps = pd.Series([0,2,2,1,0,0,1,1,0,3,0,2,1,0,0,1,0,1,0,1]) corps1 = pd.Series([0,0,0,2,0,3,0,2,0,0,0,1,1,1,0,2,0,3,1,0]) corps2 = pd.Series([0,0,0,2,0,2,0,0,1,1,0,0,2,1,1,0,0,2,0,0]) corps3 = pd.Series([0,0,0,1,1,1,2,0,2,0,0,0,1,0,1,2,1,0,0,0]) corps4 = pd.Series([0,1,0,1,1,1,1,0,0,0,0,1,0,0,0,0,1,1,0,0]) corps5 = pd.Series([0,0,0,0,2,1,0,0,1,0,0,1,0,1,1,1,1,1,1,0]) corps6 = pd.Series([0,0,1,0,2,0,0,1,2,0,1,1,3,1,1,1,0,3,0,0]) corps7 = pd.Series([1,0,1,0,0,0,1,0,1,1,0,0,2,0,0,2,1,0,2,0]) corps8 = pd.Series([1,0,0,0,1,0,0,1,0,0,0,0,1,0,0,0,1,1,0,1]) corps9 = pd.Series([0,0,0,0,0,2,1,1,1,0,2,1,1,0,1,2,0,1,0,0]) corps10 = pd.Series([0,0,1,1,0,1,0,2,0,2,0,0,0,0,2,1,3,0,1,1]) corps11 = pd.Series([0,0,0,0,2,4,0,1,3,0,1,1,1,1,2,1,3,1,3,1]) corps14 = pd.Series([ 1,1,2,1,1,3,0,4,0,1,0,3,2,1,0,2,1,1,0,0]) corps15 = pd.Series([0,1,0,0,0,0,0,1,0,1,1,0,0,0,2,2,0,0,0,0]) """ Explanation: Create all the columns of the dataframe as series End of explanation """ variables = dict(guardCorps = guardCorps, corps1 = corps1, corps2 = corps2, corps3 = corps3, corps4 = corps4, corps5 = corps5, corps6 = corps6, corps7 = corps7, corps8 = corps8, corps9 = corps9, corps10 = corps10, corps11 = corps11 , corps14 = corps14, corps15 = corps15) """ Explanation: Create a dictionary variable that assigns variable names End of explanation """ horsekick = pd.DataFrame(variables, columns = ['guardCorps', 'corps1', 'corps2', 'corps3', 'corps4', 'corps5', 'corps6', 'corps7', 'corps8', 'corps9', 'corps10', 'corps11', 'corps14', 'corps15']) """ Explanation: Create a dataframe and set the order of the columns using the columns attribute End of explanation """ horsekick.index = [1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894] """ Explanation: Set the dataframe's index to be year End of explanation """ horsekick """ Explanation: View the horsekick dataframe End of explanation """ result = horsekick.apply(pd.value_counts).fillna(0); result """ Explanation: Count the number of times each number of deaths occurs in each regiment End of explanation """ pd.value_counts(horsekick['guardCorps'].values, sort=False) """ Explanation: Count the number of times each monthly death total appears in guardCorps End of explanation """ horsekick['guardCorps'].unique() """ Explanation: List all the unique values in guardCorps End of explanation """
sdpython/ensae_teaching_cs
_doc/notebooks/competitions/2017/prepare_data_2017.ipynb
mit
from jyquickhelper import add_notebook_menu add_notebook_menu() """ Explanation: 2A.ml - 2017 - Préparation des données Ce notebook explique comment les données de la compétation 2017 ont été préparées. On récupére d'abord les données depuis le site OpenFoodFacts. End of explanation """ import os os.stat("c:/temp/fr.openfoodfacts.org.products.csv").st_size / 2**30, 'Go' """ Explanation: A quoi ça ressemble End of explanation """ import pyensae %load_ext pyensae %head -n 2 c:/temp/fr.openfoodfacts.org.products.csv import pandas df = pandas.read_csv("c:/temp/fr.openfoodfacts.org.products.csv", sep="\t", encoding="utf-8", nrows=10000, low_memory=False) df.head().T.to_excel("e.xlsx") df[df.additives.notnull() & df.additives.str.contains("E4")].head().T """ Explanation: C'est gros. End of explanation """ import dask import dask.dataframe as dd """ Explanation: Idée de la compétation On veut savoir les additifs ajoutés apparaissent plus fréquemment avec certains produits ou certains compositions. ON cherche donc à prédire la présence d'additifs en fonction de toutes les autres variables. Si un modèle de prédiction fait mieux que le hasard, cela signifie que certaines corrélations existent. J'ai utilisé dask mais si vous de la mémoire, on peut faire avec pandas. End of explanation """ ddf = dd.read_csv("c:/temp/fr.openfoodfacts.org.products.csv", sep="\t", encoding="utf-8", low_memory=False, dtype={'allergens': 'object', 'cities_tags': 'object', 'emb_codes': 'object', 'emb_codes_tags': 'object', 'first_packaging_code_geo': 'object', 'generic_name': 'object', 'ingredients_from_palm_oil_tags': 'object', 'labels': 'object', 'labels_fr': 'object', 'labels_tags': 'object', 'manufacturing_places': 'object', 'manufacturing_places_tags': 'object', 'origins': 'object', 'origins_tags': 'object', 'stores': 'object', 'code': 'object','allergens_fr': 'object', 'cities': 'object', 'created_t': 'object', 'last_modified_t': 'object'}) ddf.head() print(type(ddf)) """ Explanation: Le code qui suit est construit après plusieurs essais en fonction des warnings retournés par le module dask. End of explanation """ ddfe = ddf.assign(hasE=ddf.apply(lambda row: isinstance(row.additives, str) and "en:e" in row.additives, axis=1, meta=bool)) ddfe.head() """ Explanation: On ajoute la colonne à prédire, booleénne, qui indique la présence d'additif commençant par 'e:' comme E440. End of explanation """ g100 = [_ for _ in ddf.columns if '100g' in _] g100 ddfe.compute().shape import numpy ddfe100 = ddfe.assign(s100=ddf.apply(lambda row: sum(0 if numpy.isnan(row[g]) else 1 for g in g100), axis=1, meta=float)) ddfe100 = ddfe100[ddfe100.s100 > 0] ddfe100.head() """ Explanation: On se limite au produit pour lesquels on a quelques informations sur le contenu. End of explanation """ ddfe100.to_csv("ddfe100*.csv", sep="\t", encoding="utf-8", index=False) """ Explanation: Bon la suite prend un peu de temps et ça n'est pas hyper efficace. Il faudrait un dask qui n'utilise pas dask mais uniquement les dataframes pour que ça aille plus vite. Café. End of explanation """ dffefiles = [_ for _ in os.listdir(".") if "ddfe" in _] dffefiles """ Explanation: Bon je crois que je vais vraiment développer une truc comme dask juste avec pandas. End of explanation """ types = {k:v for k, v in zip(ddfe100.columns, ddfe100.dtypes)} from sklearn.model_selection import train_test_split for i, name in enumerate(dffefiles): print("name", name) df = pandas.read_csv(name, sep="\t", encoding="utf-8", dtype=types) df_train, df_test = train_test_split(df, test_size =0.5) df_test, df_eval = train_test_split(df_test, test_size =0.5) df_train.to_csv("off_train{0}.txt".format(i), sep="\t", index=False, encoding="utf-8") df_test.to_csv("off_test{0}.txt".format(i), sep="\t", index=False, encoding="utf-8") df_eval.to_csv("off_eval{0}.txt".format(i), sep="\t", index=False, encoding="utf-8") """ Explanation: Split... On impose les mêmes types pour chaque data frame. End of explanation """ df[["additives", "hasE"]].head() import re reg = re.compile("[[](.*?)[]]") addi = re.compile("(en[:]e[0-9])") def has_emachine(v): if isinstance(v, (list, pandas.core.series.Series)): rem = [] add = [] for _ in v: if isinstance(_, str): fd = reg.findall(_) for __ in fd: if " en:e" in __ and addi.search(__): add.append(__)#.split("->")[-1].strip()) elif " en:" not in __: continue else: rem.append(__.split("->")[-1].strip()) else: continue return add, list(sorted(set(rem))) elif isinstance(v, float) and numpy.isnan(v): return [], [] elif isinstance(v, str): if "," in v: raise Exception('{0}\n{1}'.format(type(v), v)) return has_emachine([v]) else: # ??? raise Exception('{0}\n{1}'.format(type(v), v)) hasE, clean = has_emachine(df.loc[1,"additives"]) hasE, clean """ Explanation: Ah j'allais oublié, il faut bidouiller la colonne additives pour retirer éviter un memory leak et on recalcule la colonne hasE pour être sûr. End of explanation """ off = [_ for _ in os.listdir(".") if "off" in _ and "all" not in _] for cont in ['train', 'test', 'eval']: sub = [_ for _ in off if cont in _] dfs = [] for name in sub: df = pandas.read_csv(name, sep="\t", encoding="utf-8", dtype=types) print("name", name, df.shape) df["hasE"] = df["additives"].apply(lambda x: len(has_emachine(x)[0]) > 0) df["additives"] = df["additives"].apply(lambda x: ";".join(has_emachine(x)[1])) dfs.append(df) df = pandas.concat(dfs, axis=0) print("merged", df.shape) df.to_csv("off_{0}_all.txt".format(cont), sep="\t", index=False, encoding="utf-8") """ Explanation: On recompose le tout. End of explanation """ len(types) df_eval = pandas.read_csv("off_eval_all.txt", sep="\t", dtype=types, encoding="utf-8") df_eval_X = df_eval.drop("hasE", axis=1) df_eval_X.to_csv("off_eval_all_X.txt") df_eval[["hasE"]].to_csv("off_eval_all_Y.txt") """ Explanation: Il y aura probablement un ou deux data leak dans les autres colonnes.. On découpe le jeu d'évaluation. End of explanation """ df_train = pandas.read_csv("off_train_all.txt", sep="\t", dtype=types, encoding="utf-8") df_train.shape X = df_train[g100].fillna(0) Y = df_train['hasE'] from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, Y) pred = clf.predict(X) from sklearn.metrics import confusion_matrix confusion_matrix(Y, pred) df_test = pandas.read_csv("off_test_all.txt", sep="\t", dtype=types, encoding="utf-8") X_test = df_test[g100].fillna(0) Y_test = df_test['hasE'] pred = clf.predict(X_test) confusion_matrix(Y_test, pred) """ Explanation: Premier modèle End of explanation """ y_proba = clf.predict_proba(X_test) y_pred = clf.predict(X_test) print(y_proba[:3]) print(y_pred[:3]) y_test = Y_test.values type(y_pred), type(Y_test), type(y_test) import numpy prob_pred = numpy.array([(y_proba[i, 1] if c else y_proba[i, 0]) for i, c in enumerate(y_pred)]) prob_pred[:3] from sklearn.metrics import roc_curve fpr, tpr, th = roc_curve(y_pred == y_test, prob_pred) %matplotlib inline import matplotlib.pyplot as plt plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='Courbe ROC') plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel("Proportion mal classée") plt.ylabel("Proportion bien classée") plt.title('ROC') plt.legend(loc="lower right") """ Explanation: ROC End of explanation """
ML4DS/ML4all
C3.Classification_LogReg/RegresionLogistica_student.ipynb
mit
# To visualize plots in the notebook %matplotlib inline # Imported libraries import csv import random import matplotlib import matplotlib.pyplot as plt import pylab import numpy as np from mpl_toolkits.mplot3d import Axes3D from sklearn.preprocessing import PolynomialFeatures from sklearn import linear_model """ Explanation: Logistic Regression Notebook version: 2.0 (Nov 21, 2017) 2.1 (Oct 19, 2018) 2.2 (Oct 09, 2019) 2.3 (Oct 27, 2020) Author: Jesús Cid Sueiro (jcid@tsc.uc3m.es) Jerónimo Arenas García (jarenas@tsc.uc3m.es) Changes: v.1.0 - First version v.1.1 - Typo correction. Prepared for slide presentation v.2.0 - Prepared for Python 3.0 (backcompmatible with 2.7) Assumptions for regression model modified v.2.1 - Minor changes regarding notation and assumptions v.2.2 - Updated notation v.2.3 - Improved slides format. Backward compatibility removed End of explanation """ # Define the logistic function def logistic(t): #<SOL> #</SOL> # Plot the logistic function t = np.arange(-6, 6, 0.1) z = logistic(t) plt.plot(t, z) plt.xlabel('$t$', fontsize=14) plt.ylabel('$g(t)$', fontsize=14) plt.title('The logistic function') plt.grid() """ Explanation: 1. Introduction 1.1. Binary classification The goal of a classification problem is to assign a class or category to every instance or observation of a data collection. Here, we will assume that every instance ${\bf x}$ is an $N$-dimensional vector in $\mathbb{R}^N$, and the class $y$ of sample ${\bf x}$ is an element of a binary set ${\mathcal Y} = {0, 1}$. The goal of a classifier is to predict the true value of $y$ after observing ${\bf x}$. We will denote as $\hat{y}$ the classifier output or decision. If $y=\hat{y}$, the decision is a hit, otherwise $y\neq \hat{y}$ and the decision is an error. 1.2. Decision theory: the MAP criterion Decision theory provides a solution to the classification problem in situations where the relation between instance ${\bf x}$ and its class $y$ is given by a known probabilistic model. Assume that every tuple $({\bf x}, y)$ is an outcome of a random vector $({\bf X}, Y)$ with joint distribution $p_{{\bf X},Y}({\bf x}, y)$. A natural criteria for classification is to select predictor $\hat{Y}=f({\bf x})$ in such a way that the probability or error, $P{\hat{Y} \neq Y}$ is minimum. Noting that $$ P{\hat{Y} \neq Y} = \int P{\hat{Y} \neq Y | {\bf x}} p_{\bf X}({\bf x}) d{\bf x} $$ the optimal decision maker should take, for every sample ${\bf x}$, the decision minimizing the conditional error probability: \begin{align} \hat{y}^* &= \arg\min_{\hat{y}} P{Y \neq \hat{y} |{\bf x}} \ &= \arg\max_{\hat{y}} P{Y = \hat{y} |{\bf x}} \ \end{align} Thus, the optimal decision rule can be expressed as $$ P_{Y|{\bf X}}(1|{\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}{\hat{y}=0}\quad P{Y|{\bf X}}(0|{\bf x}) $$ or, equivalently $$ P_{Y|{\bf X}}(1|{\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0}\quad \frac{1}{2} $$ The classifier implementing this decision rule is usually referred to as the MAP (Maximum A Posteriori) classifier. As we have seen, the MAP classifier minimizes the error probability for binary classification, but the result can also be generalized to multiclass classification problems. 1.3. Learning Classical decision theory is grounded on the assumption that the probabilistic model relating the observed sample ${\bf X}$ and the true hypothesis $Y$ is known. Unfortunately, this is unrealistic in many applications, where the only available information to construct the classifier is a dataset $\mathcal D = {{\bf x}k, y_k}{k=0}^{K-1}$ of instances and their respective class labels. A more realistic formulation of the classification problem is the following: given a dataset $\mathcal D = {({\bf x}k, y_k) \in {\mathbb{R}}^N \times {\mathcal Y}, \, k=0,\ldots,{K-1}}$ of independent and identically distributed (i.i.d.) samples from an unknown distribution $p{{\bf X},Y}({\bf x}, y)$, predict the class $y$ of a new sample ${\bf x}$ with the minimum probability of error. 1.4. Parametric classifiers Since the probabilistic model generating the data is unknown, the MAP decision rule cannot be applied. However, we can use the dataset to estimate the a posterior class probability model, and apply it to approximate the MAP decision maker. Parametric classifiers based on this idea assume, additionally, that the posterior class probabilty satisfies some parametric formula: $$ P_{Y|X}(1|{\bf x},{\bf w}) = f_{\bf w}({\bf x}) $$ where ${\bf w}$ is a vector of parameters. Given the expression of the MAP decision maker, classification consists in comparing the value of $f_{\bf w}({\bf x})$ with the threshold $\frac{1}{2}$, and each parameter vector would be associated to a different decision maker. <img src="./figs/parametric_decision.png" width=400> In practice, the dataset ${\mathcal D}$ is used to select a particular parameter vector $\hat{\bf w}$ according to certain criterion. Accordingly, the decision rule becomes $$ f_{\hat{\bf w}}({\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0}\quad \frac{1}{2} $$ In this notebook, we explore one of the most popular model-based parametric classification methods: logistic regression. 2. Logistic regression. 2.1. The logistic function The logistic regression model assumes that the binary class label $Y \in {0,1}$ of observation $X\in \mathbb{R}^N$ satisfies the expression. $$P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g({\bf w}^\intercal{\bf x})$$ $$P_{Y|{\bf,X}}(0|{\bf x}, {\bf w}) = 1-g({\bf w}^\intercal{\bf x})$$ where ${\bf w}$ is a parameter vector and $g(·)$ is the logistic function, which is defined by $$g(t) = \frac{1}{1+\exp(-t)}$$ The code below defines and plots the logistic function: End of explanation """ # Weight vector: w = [4, 8] # Try different weights # Create a rectangular grid. x_min = -1 x_max = 1 h = (x_max - x_min) / 200 xgrid = np.arange(x_min, x_max, h) xx0, xx1 = np.meshgrid(xgrid, xgrid) # Compute the logistic map for the given weights, and plot Z = logistic(w[0]*xx0 + w[1]*xx1) fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper) ax.contour(xx0, xx1, Z, levels=[0.5], colors='b', linewidths=(3,)) plt.xlabel('$x_0$') plt.ylabel('$x_1$') ax.set_zlabel('P(1|x,w)') plt.show() """ Explanation: It is straightforward to see that the logistic function has the following properties: P1: Probabilistic output: $\quad 0 \le g(t) \le 1$ P2: Symmetry: $\quad g(-t) = 1-g(t)$ P3: Monotonicity: $\quad g'(t) = g(t)\cdot [1-g(t)] \ge 0$ Exercise 1: Verify properties P2 and P3. Exercise 2: Implement a function to compute the logistic function, and use it to plot such function in the inverval $[-6,6]$. 2.2. Classifiers based on the logistic model. The MAP classifier under a logistic model will have the form $$P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g({\bf w}^\intercal{\bf x}) \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0} \quad \frac{1}{2} $$ Therefore $$ 2 \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0} \quad 1 + \exp(-{\bf w}^\intercal{\bf x}) $$ which is equivalent to $${\bf w}^\intercal{\bf x} \quad\mathop{\gtrless}^{\hat{y}=1}_{\hat{y}=0}\quad 0 $$ Thus, the classifiers based on the logistic model are given by linear decision boundaries passing through the origin, ${\bf x} = {\bf 0}$. End of explanation """ CS = plt.contourf(xx0, xx1, Z) CS2 = plt.contour(CS, levels=[0.5], colors='m', linewidths=(3,)) plt.xlabel('$x_0$') plt.ylabel('$x_1$') plt.colorbar(CS, ticks=[0, 0.5, 1]) plt.show() """ Explanation: The next code fragment represents the output of the same classifier, representing the output of the logistic function in the $x_0$-$x_1$ plane, encoding the value of the logistic function in the color map. End of explanation """ # Weight vector: w = [1, 10, 10, -20, 5, 1] # Try different weights # Create a regtangular grid. x_min = -1 x_max = 1 h = (x_max - x_min) / 200 xgrid = np.arange(x_min, x_max, h) xx0, xx1 = np.meshgrid(xgrid, xgrid) # Compute the logistic map for the given weights # Z = <FILL IN> # Plot the logistic map fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(xx0, xx1, Z, cmap=plt.cm.copper) plt.xlabel('$x_0$') plt.ylabel('$x_1$') ax.set_zlabel('P(1|x,w)') plt.show() CS = plt.contourf(xx0, xx1, Z) CS2 = plt.contour(CS, levels=[0.5], colors='m', linewidths=(3,)) plt.xlabel('$x_0$') plt.ylabel('$x_1$') plt.colorbar(CS, ticks=[0, 0.5, 1]) plt.show() """ Explanation: 3.3. Nonlinear classifiers. The logistic model can be extended to construct non-linear classifiers by using non-linear data transformations. A general form for a nonlinear logistic regression model is $$P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g[{\bf w}^\intercal{\bf z}({\bf x})] $$ where ${\bf z}({\bf x})$ is an arbitrary nonlinear transformation of the original variables. The boundary decision in that case is given by equation $$ {\bf w}^\intercal{\bf z} = 0 $$ Exercise 3: Modify the code above to generate a 3D surface plot of the polynomial logistic regression model given by $$ P_{Y|{\bf X}}(1|{\bf x}, {\bf w}) = g(1 + 10 x_0 + 10 x_1 - 20 x_0^2 + 5 x_0 x_1 + x_1^2) $$ End of explanation """ # Adapted from a notebook by Jason Brownlee def loadDataset(filename, split): xTrain, cTrain, xTest, cTest = [], [], [], [] with open(filename, 'r') as csvfile: lines = csv.reader(csvfile) dataset = list(lines) for i in range(len(dataset)-1): for y in range(4): dataset[i][y] = float(dataset[i][y]) item = dataset[i] if random.random() < split: xTrain.append(item[0:4]) cTrain.append(item[4]) else: xTest.append(item[0:4]) cTest.append(item[4]) return xTrain, cTrain, xTest, cTest xTrain_all, cTrain_all, xTest_all, cTest_all = loadDataset('iris.data', 0.66) nTrain_all = len(xTrain_all) nTest_all = len(xTest_all) print('Train:', nTrain_all) print('Test:', nTest_all) """ Explanation: 3. Inference Remember that the idea of parametric classification is to use the training data set $\mathcal D = {({\bf x}_k, y_k) \in {\mathbb{R}}^N \times {0,1}, k=0,\ldots,{K-1}}$ to estimate ${\bf w}$. The estimate, $\hat{\bf w}$, can be used to compute the label prediction for any new observation as $$\hat{y} = \arg\max_y P_{Y|{\bf X}}(y|{\bf x},\hat{\bf w}).$$ <img src="figs/parametric_decision.png" width=400> In this notebook, we will discuss two different approaches to the estimation of ${\bf w}$: Maximum Likelihood (ML): $\hat{\bf w}{\text{ML}} = \arg\max{\bf w} P_{{\mathcal D}|{\bf W}}({\mathcal D}|{\bf w})$ Maximum *A Posteriori (MAP): $\hat{\bf w}{\text{MAP}} = \arg\max{\bf w} p_{{\bf W}|{\mathcal D}}({\bf w}|{\mathcal D})$ For the mathematical derivation of the logistic regression algorithm, the following representation of the logistic model will be useful: using the symmetry property of the logistic function, we can write $$P_{Y|{\bf X}}(0|{\bf x}, {\bf w}) = 1-g\left({\bf w}^\intercal{\bf z}({\bf x})\right) = g\left(-{\bf w}^\intercal{\bf z}({\bf x})\right)$$ thus $$P_{Y|{\bf X}}(y|{\bf x}, {\bf w}) = g\left(\overline{y}{\bf w}^\intercal{\bf z}({\bf x})\right)$$ where $\overline{y} = 2y-1$ is a symmetrized label ($\overline{y}\in{-1, 1}$). 3.1. Model assumptions In the following, we will make the following assumptions: A1. (Logistic Regression): We assume a logistic model for the a posteriori probability of ${Y}$ given ${\bf X}$, i.e., $$P_{Y|{\bf X}}(y|{\bf x}, {\bf w}) = g\left({\bar y}\cdot {\bf w}^\intercal{\bf z}({\bf x})\right).$$ A2. All samples in ${\mathcal D}$ have been generated from the same distribution, $p_{{\bf X}, Y| {\bf W}}({\bf x}, y| {\bf w})$. A3. Input variables $\bf x$ do not depend on $\bf w$. This implies that $p({\bf x}|{\bf w}) = p({\bf x})$ A4. Targets $y_0, \cdots, y_{K-1}$ are statistically independent given $\bf w$ and the inputs ${\bf x}0, \cdots, {\bf x}{K-1}$, that is: $$P(y_0, \cdots, y_{K-1} | {\bf x}0, \cdots, {\bf x}{K-1}, {\bf w}) = \prod_{k=0}^{K-1} P(y_k | {\bf x}_k, {\bf w})$$ 3.2. ML estimation. The ML estimate is defined as $$\hat{\bf w}{\text{ML}} = \arg\max{\bf w} P_{{\mathcal D}|{\bf W}}({\mathcal D}|{\bf w})$$ Ussing assumptions A2 and A3 above, we have that \begin{align} P_{{\mathcal D}|{\bf W}}({\mathcal D}|{\bf w}) & = p(y_0, \cdots, y_{K-1},{\bf x}0, \cdots, {\bf x}{K-1}| {\bf w}) \ & = P(y_0, \cdots, y_{K-1}|{\bf x}0, \cdots, {\bf x}{K-1}, {\bf w}) \; p({\bf x}0, \cdots, {\bf x}{K-1}| {\bf w}) \ & = P(y_0, \cdots, y_{K-1}|{\bf x}0, \cdots, {\bf x}{K-1}, {\bf w}) \; p({\bf x}0, \cdots, {\bf x}{K-1})\end{align} Finally, using assumption A4, we can formulate the ML estimation of $\bf w$ as the resolution of the following optimization problem \begin{align} \hat {\bf w}\text{ML} & = \arg \max{\bf w} P(y_0, \cdots, y_{K-1}|{\bf x}0, \cdots, {\bf x}{K-1}, {\bf w}) \ & = \arg \max_{\bf w} \prod_{k=0}^{K-1} P(y_k|{\bf x}k, {\bf w}) \ & = \arg \max{\bf w} \sum_{k=0}^{K-1} \log P(y_k|{\bf x}k, {\bf w}) \ & = \arg \min{\bf w} \sum_{k=0}^{K-1} - \log P(y_k|{\bf x}_k, {\bf w}) \end{align} where the arguments of the maximization or minimization problems of the last three lines are usually referred to as the likelihood, log-likelihood $\left[L(\bf w)\right]$, and negative log-likelihood $\left[\text{NLL}(\bf w)\right]$, respectively. Now, using A1 (the logistic model) \begin{align} \text{NLL}({\bf w}) &= - \sum_{k=0}^{K-1}\log\left[g\left(\overline{y}k{\bf w}^\intercal {\bf z}_k\right)\right] \ &= \sum{k=0}^{K-1}\log\left[1+\exp\left(-\overline{y}_k{\bf w}^\intercal {\bf z}_k\right)\right] \end{align} where ${\bf z}_k={\bf z}({\bf x}_k)$. It can be shown that $\text{NLL}({\bf w})$ is a convex and differentiable function of ${\bf w}$. Therefore, its minimum is a point with zero gradient. \begin{align} \nabla_{\bf w} \text{NLL}(\hat{\bf w}{\text{ML}}) &= - \sum{k=0}^{K-1} \frac{\exp\left(-\overline{y}k\hat{\bf w}{\text{ML}}^\intercal {\bf z}k\right) \overline{y}_k {\bf z}_k} {1+\exp\left(-\overline{y}_k\hat{\bf w}{\text{ML}}^\intercal {\bf z}k \right)} = \ &= - \sum{k=0}^{K-1} \left[y_k-g(\hat{\bf w}_{\text{ML}}^T {\bf z}_k)\right] {\bf z}_k = 0 \end{align} Unfortunately, $\hat{\bf w}_{\text{ML}}$ cannot be taken out from the above equation, and some iterative optimization algorithm must be used to search for the minimum. 3.3. Gradient descent. A simple iterative optimization algorithm is <a href = https://en.wikipedia.org/wiki/Gradient_descent> gradient descent</a>. \begin{align} {\bf w}{n+1} = {\bf w}_n - \rho_n \nabla{\bf w} \text{NLL}({\bf w}_n) \end{align} where $\rho_n >0$ is the learning step. Applying the gradient descent rule to logistic regression, we get the following algorithm: \begin{align} {\bf w}{n+1} &= {\bf w}_n + \rho_n \sum{k=0}^{K-1} \left[y_k-g({\bf w}_n^\intercal {\bf z}_k)\right] {\bf z}_k \end{align} Gradient descent in matrix form Defining vectors \begin{align} {\bf y} &= [y_0,\ldots,y_{K-1}]^\top \ \hat{\bf p}n &= [g({\bf w}_n^\top {\bf z}_0), \ldots, g({\bf w}_n^\top {\bf z}{K-1})]^\top \end{align} and matrix \begin{align} {\bf Z} = \left[{\bf z}0,\ldots,{\bf z}{K-1}\right]^\top \end{align} we can write \begin{align} {\bf w}_{n+1} &= {\bf w}_n + \rho_n {\bf Z}^\top \left({\bf y}-\hat{\bf p}_n\right) \end{align} In the following, we will explore the behavior of the gradient descend method using the Iris Dataset. End of explanation """ # Select attributes i = 0 # Try 0,1,2,3 j = 1 # Try 0,1,2,3 with j!=i # Select two classes c0 = 'Iris-versicolor' c1 = 'Iris-virginica' # Select two coordinates ind = [i, j] # Take training test X_tr = np.array([[xTrain_all[n][i] for i in ind] for n in range(nTrain_all) if cTrain_all[n]==c0 or cTrain_all[n]==c1]) C_tr = [cTrain_all[n] for n in range(nTrain_all) if cTrain_all[n]==c0 or cTrain_all[n]==c1] Y_tr = np.array([int(c==c1) for c in C_tr]) n_tr = len(X_tr) # Take test set X_tst = np.array([[xTest_all[n][i] for i in ind] for n in range(nTest_all) if cTest_all[n]==c0 or cTest_all[n]==c1]) C_tst = [cTest_all[n] for n in range(nTest_all) if cTest_all[n]==c0 or cTest_all[n]==c1] Y_tst = np.array([int(c==c1) for c in C_tst]) n_tst = len(X_tst) """ Explanation: Now, we select two classes and two attributes. End of explanation """ def normalize(X, mx=None, sx=None): # Compute means and standard deviations if mx is None: mx = np.mean(X, axis=0) if sx is None: sx = np.std(X, axis=0) # Normalize X0 = (X-mx)/sx return X0, mx, sx """ Explanation: 3.2.2. Data normalization Normalization of data is a common pre-processing step in many machine learning algorithms. Its goal is to get a dataset where all input coordinates have a similar scale. Learning algorithms usually show less instabilities and convergence problems when data are normalized. We will define a normalization function that returns a training data matrix with zero sample mean and unit sample variance. End of explanation """ # Normalize data Xn_tr, mx, sx = normalize(X_tr) Xn_tst, mx, sx = normalize(X_tst, mx, sx) """ Explanation: Now, we can normalize training and test data. Observe in the code that the same transformation should be applied to training and test data. This is the reason why normalization with the test data is done using the means and the variances computed with the training set. End of explanation """ # Separate components of x into different arrays (just for the plots) x0c0 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==0] x1c0 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==0] x0c1 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==1] x1c1 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==1] # Scatterplot. labels = {'Iris-setosa': 'Setosa', 'Iris-versicolor': 'Versicolor', 'Iris-virginica': 'Virginica'} plt.plot(x0c0, x1c0,'r.', label=labels[c0]) plt.plot(x0c1, x1c1,'g+', label=labels[c1]) plt.xlabel('$x_' + str(ind[0]) + '$') plt.ylabel('$x_' + str(ind[1]) + '$') plt.legend(loc='best') plt.axis('equal') plt.show() """ Explanation: The following figure generates a plot of the normalized training data. End of explanation """ def logregFit(Z_tr, Y_tr, rho, n_it): # Data dimension n_dim = Z_tr.shape[1] # Initialize variables nll_tr = np.zeros(n_it) pe_tr = np.zeros(n_it) Y_tr2 = 2*Y_tr - 1 # Transform labels into binary symmetric. w = np.random.randn(n_dim,1) # Running the gradient descent algorithm for n in range(n_it): # Compute posterior probabilities for weight w p1_tr = logistic(np.dot(Z_tr, w)) # Compute negative log-likelihood # (note that this is not required for the weight update, only for nll tracking) nll_tr[n] = np.sum(np.log(1 + np.exp(-np.dot(Y_tr2*Z_tr, w)))) # Update weights w += rho*np.dot(Z_tr.T, Y_tr - p1_tr) return w, nll_tr def logregPredict(Z, w): # Compute posterior probability of class 1 for weights w. p = logistic(np.dot(Z, w)).flatten() # Class D = [int(round(pn)) for pn in p] return p, D """ Explanation: In order to apply the gradient descent rule, we need to define two methods: - A fit method, that receives the training data and returns the model weights and the value of the negative log-likelihood during all iterations. - A predict method, that receives the model weight and a set of inputs, and returns the posterior class probabilities for that input, as well as their corresponding class predictions. End of explanation """ # Parameters of the algorithms rho = float(1)/50 # Learning step n_it = 200 # Number of iterations # Compute Z's Z_tr = np.c_[np.ones(n_tr), Xn_tr] Z_tst = np.c_[np.ones(n_tst), Xn_tst] n_dim = Z_tr.shape[1] # Convert target arrays to column vectors Y_tr2 = Y_tr[np.newaxis].T Y_tst2 = Y_tst[np.newaxis].T # Running the gradient descent algorithm w, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it) # Classify training and test data p_tr, D_tr = logregPredict(Z_tr, w) p_tst, D_tst = logregPredict(Z_tst, w) # Compute error rates E_tr = D_tr!=Y_tr E_tst = D_tst!=Y_tst # Error rates pe_tr = float(sum(E_tr)) / n_tr pe_tst = float(sum(E_tst)) / n_tst # NLL plot. plt.plot(range(n_it), nll_tr,'b.:', label='Train') plt.xlabel('Iteration') plt.ylabel('Negative Log-Likelihood') plt.legend() print(f'The optimal weights are: {w}') print('The final error rates are:') print(f'- Training: {pe_tr}') print(f'- Test: {pe_tst}') print(f'The NLL after training is {nll_tr[len(nll_tr)-1]}') """ Explanation: We can test the behavior of the gradient descent method by fitting a logistic regression model with ${\bf z}({\bf x}) = (1, {\bf x}^\top)^\top$. End of explanation """ # Create a regtangular grid. x_min, x_max = Xn_tr[:, 0].min(), Xn_tr[:, 0].max() y_min, y_max = Xn_tr[:, 1].min(), Xn_tr[:, 1].max() dx = x_max - x_min dy = y_max - y_min h = dy /400 xx, yy = np.meshgrid(np.arange(x_min - 0.1 * dx, x_max + 0.1 * dx, h), np.arange(y_min - 0.1 * dx, y_max + 0.1 * dy, h)) X_grid = np.array([xx.ravel(), yy.ravel()]).T # Compute Z's Z_grid = np.c_[np.ones(X_grid.shape[0]), X_grid] # Compute the classifier output for all samples in the grid. pp, dd = logregPredict(Z_grid, w) # Paint output maps pylab.rcParams['figure.figsize'] = 6, 6 # Set figure size # Color plot plt.plot(x0c0, x1c0,'r.', label=labels[c0]) plt.plot(x0c1, x1c1,'g+', label=labels[c1]) plt.xlabel('$x_' + str(ind[0]) + '$') plt.ylabel('$x_' + str(ind[1]) + '$') plt.legend(loc='best') plt.axis('equal') pp = pp.reshape(xx.shape) CS = plt.contourf(xx, yy, pp, cmap=plt.cm.copper) plt.contour(xx, yy, pp, levels=[0.5], colors='b', linewidths=(3,)) plt.colorbar(CS, ticks=[0, 0.5, 1]) plt.show() """ Explanation: 3.2.3. Free parameters Under certain conditions, the gradient descent method can be shown to converge asymptotically (i.e. as the number of iterations goes to infinity) to the ML estimate of the logistic model. However, in practice, the final estimate of the weights ${\bf w}$ depend on several factors: Number of iterations Initialization Learning step Exercise 4: Visualize the variability of gradient descent caused by initializations. To do so, fix the number of iterations to 200 and the learning step, and execute the gradient descent 100 times, storing the training error rate of each execution. Plot the histogram of the error rate values. Note that you can do this exercise with a loop over the 100 executions, including the code in the previous code slide inside the loop, with some proper modifications. To plot a histogram of the values in array p with nbins, you can use plt.hist(p, n) 3.2.3.1. Learning step The learning step, $\rho$, is a free parameter of the algorithm. Its choice is critical for the convergence of the algorithm. Too large values of $\rho$ make the algorithm diverge. For too small values, the convergence gets very slow and more iterations are required for a good convergence. Exercise 5: Observe the evolution of the negative log-likelihood with the number of iterations for different values of $\rho$. It is easy to check that, for large enough $\rho$, the gradient descent method does not converge. Can you estimate (through manual observation) an approximate value of $\rho$ stating a boundary between convergence and divergence? Exercise 6: In this exercise we explore the influence of the learning step more sistematically. Use the code in the previouse exercises to compute, for every value of $\rho$, the average error rate over 100 executions. Plot the average error rate vs. $\rho$. Note that you should explore the values of $\rho$ in a logarithmic scale. For instance, you can take $\rho = 1, \frac{1}{10}, \frac{1}{100}, \frac{1}{1000}, \ldots$ In practice, the selection of $\rho$ may be a matter of trial an error. Also there is some theoretical evidence that the learning step should decrease along time up to cero, and the sequence $\rho_n$ should satisfy two conditions: - C1: $\sum_{n=0}^{\infty} \rho_n^2 < \infty$ (decrease slowly) - C2: $\sum_{n=0}^{\infty} \rho_n = \infty$ (but not too slowly) For instance, we can take $\rho_n= \frac{1}{n}$. Another common choice is $\rho_n = \frac{\alpha}{1+\beta n}$ where $\alpha$ and $\beta$ are also free parameters that can be selected by trial and error with some heuristic method. 3.2.4. Visualizing the posterior map. We can also visualize the posterior probability map estimated by the logistic regression model for the estimated weights. End of explanation """ # Parameters of the algorithms rho = float(1)/50 # Learning step n_it = 500 # Number of iterations g = 5 # Degree of polynomial # Compute Z_tr poly = PolynomialFeatures(degree=g) Z_tr = poly.fit_transform(Xn_tr) # Normalize columns (this is useful to make algorithms more stable).) Zn, mz, sz = normalize(Z_tr[:,1:]) Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1) # Compute Z_tst Z_tst = poly.fit_transform(Xn_tst) Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz) Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1) # Convert target arrays to column vectors Y_tr2 = Y_tr[np.newaxis].T Y_tst2 = Y_tst[np.newaxis].T # Running the gradient descent algorithm w, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it) # Classify training and test data p_tr, D_tr = logregPredict(Z_tr, w) p_tst, D_tst = logregPredict(Z_tst, w) # Compute error rates E_tr = D_tr!=Y_tr E_tst = D_tst!=Y_tst # Error rates pe_tr = float(sum(E_tr)) / n_tr pe_tst = float(sum(E_tst)) / n_tst # NLL plot. plt.plot(range(n_it), nll_tr,'b.:', label='Train') plt.xlabel('Iteration') plt.ylabel('Negative Log-Likelihood') plt.legend() print(f'The optimal weights are: {w.T}') print('The final error rates are:') print(f'- Training: {pe_tr} \n- Test: {pe_tst}') print('The NLL after training is', nll_tr[len(nll_tr)-1]) """ Explanation: 3.2.5. Polynomial Logistic Regression The error rates of the logistic regression model can be potentially reduced by using polynomial transformations. To compute the polynomial transformation up to a given degree, we can use the PolynomialFeatures method in sklearn.preprocessing. End of explanation """ # Compute Z_grid Z_grid = poly.fit_transform(X_grid) Zn, mz, sz = normalize(Z_grid[:,1:], mz, sz) Z_grid = np.concatenate((np.ones((Z_grid.shape[0],1)), Zn), axis=1) # Compute the classifier output for all samples in the grid. pp, dd = logregPredict(Z_grid, w) pp = pp.reshape(xx.shape) # Paint output maps pylab.rcParams['figure.figsize'] = 6, 6 # Set figure size plt.plot(x0c0, x1c0,'r.', label=labels[c0]) plt.plot(x0c1, x1c1,'g+', label=labels[c1]) plt.xlabel('$x_' + str(ind[0]) + '$') plt.ylabel('$x_' + str(ind[1]) + '$') plt.axis('equal') plt.legend(loc='best') CS = plt.contourf(xx, yy, pp, cmap=plt.cm.copper) plt.contour(xx, yy, pp, levels=[0.5], colors='b', linewidths=(3,)) plt.colorbar(CS, ticks=[0, 0.5, 1]) plt.show() """ Explanation: Visualizing the posterior map we can se that the polynomial transformation produces nonlinear decision boundaries. End of explanation """ def logregFit2(Z_tr, Y_tr, rho, n_it, C=1e4): # Compute Z's r = 2.0/C n_dim = Z_tr.shape[1] # Initialize variables nll_tr = np.zeros(n_it) pe_tr = np.zeros(n_it) w = np.random.randn(n_dim,1) # Running the gradient descent algorithm for n in range(n_it): p_tr = logistic(np.dot(Z_tr, w)) sk = np.multiply(p_tr, 1-p_tr) S = np.diag(np.ravel(sk.T)) # Compute negative log-likelihood nll_tr[n] = - np.dot(Y_tr.T, np.log(p_tr)) - np.dot((1-Y_tr).T, np.log(1-p_tr)) # Update weights invH = np.linalg.inv(r*np.identity(n_dim) + np.dot(Z_tr.T, np.dot(S, Z_tr))) w += rho*np.dot(invH, np.dot(Z_tr.T, Y_tr - p_tr)) return w, nll_tr # Parameters of the algorithms rho = float(1)/50 # Learning step n_it = 500 # Number of iterations C = 1000 g = 4 # Compute Z_tr poly = PolynomialFeatures(degree=g) Z_tr = poly.fit_transform(X_tr) # Normalize columns (this is useful to make algorithms more stable).) Zn, mz, sz = normalize(Z_tr[:,1:]) Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1) # Compute Z_tst Z_tst = poly.fit_transform(X_tst) Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz) Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1) # Convert target arrays to column vectors Y_tr2 = Y_tr[np.newaxis].T Y_tst2 = Y_tst[np.newaxis].T # Running the gradient descent algorithm w, nll_tr = logregFit2(Z_tr, Y_tr2, rho, n_it, C) # Classify training and test data p_tr, D_tr = logregPredict(Z_tr, w) p_tst, D_tst = logregPredict(Z_tst, w) # Compute error rates E_tr = D_tr!=Y_tr E_tst = D_tst!=Y_tst # Error rates pe_tr = float(sum(E_tr)) / n_tr pe_tst = float(sum(E_tst)) / n_tst # NLL plot. plt.plot(range(n_it), nll_tr,'b.:', label='Train') plt.xlabel('Iteration') plt.ylabel('Negative Log-Likelihood') plt.legend() print('The final error rates are:') print('- Training:', str(pe_tr)) print('- Test:', str(pe_tst)) print('The NLL after training is:', str(nll_tr[len(nll_tr)-1])) """ Explanation: 4. Regularization and MAP estimation. 4.1 MAP estimation An alternative to the ML estimation of the weights in logistic regression is Maximum A Posteriori estimation. Modelling the logistic regression weights as a random variable with prior distribution $p_{\bf W}({\bf w})$, the MAP estimate is defined as $$ \hat{\bf w}{\text{MAP}} = \arg\max{\bf w} p({\bf w}|{\mathcal D}) $$ The posterior density $p({\bf w}|{\mathcal D})$ is related to the likelihood function and the prior density of the weights, $p_{\bf W}({\bf w})$ through the Bayes rule $$ p({\bf w}|{\mathcal D}) = \frac{P\left({\mathcal D}|{\bf w}\right) \; p_{\bf W}({\bf w})} {p\left({\mathcal D}\right)} $$ In general, the denominator in this expression cannot be computed analytically. However, it is not required for MAP estimation because it does not depend on ${\bf w}$. Therefore, the MAP solution is given by \begin{align} \hat{\bf w}{\text{MAP}} & = \arg\max{\bf w} \left{ P\left({\mathcal D}|{\bf w}\right) \; p_{\bf W}({\bf w}) \right}\ & = \arg\max_{\bf w} \left{ L({\mathbf w}) + \log p_{\bf W}({\bf w})\right} \ & = \arg\min_{\bf w} \left{ \text{NLL}({\mathbf w}) - \log p_{\bf W}({\bf w})\right} \end{align} In the light of this expression, we can conclude that the MAP solution is affected by two terms: - The likelihood, which takes large values for parameter vectors $\bf w$ that fit well the training data (smaller $\text{NLL}$ values) - The prior distribution of weights $p_{\bf W}({\bf w})$, which expresses our a priori preference for some solutions. 4.2. Regularization Even though the prior distribution has a natural interpretation as a model of our knowledge about $p({\bf w})$ before observing the data, its choice is frequenty motivated by the need to avoid data overfitting. Data overfitting is a frequent problem in ML estimation when the dimension of ${\bf w}$ is much higher that the dimension of the input ${\bf x}$: the ML solution can be too adjusted to the training data, while the test error rate is large. In practice we recur to prior distributions that take large values when $\|{\bf w}\|$ is small (associated to smooth classification borders). This helps to improve generalization. In this way, the MAP criterion adds a penalty term to the ML objective, that penalizes parameter vectors for which the prior distribution of weights takes small values. In machine learning, the process of introducing penalty terms to avoid overfitting is usually named regularization. 4.3 MAP estimation with Gaussian prior If we assume that ${\bf W}$ follows a zero-mean Gaussian random variable with variance matrix $v{\bf I}$, $$ p_{\bf W}({\bf w}) = \frac{1}{(2\pi v)^{N/2}} \exp\left(-\frac{1}{2v}\|{\bf w}\|^2\right) $$ the MAP estimate becomes \begin{align} \hat{\bf w}{\text{MAP}} &= \arg\min{\bf w} \left{\text{NLL}({\bf w}) + \frac{1}{C}\|{\bf w}\|^2 \right} \end{align} where $C = 2v$. Note that the regularization term associated to the prior penalizes parameter vectors with large components. Parameter $C$ controls the regularizatin, and it is named the inverse regularization strength. Noting that $$\nabla_{\bf w}\left{\text{NLL}({\bf w}) + \frac{1}{C}\|{\bf w}\|^2\right} = - {\bf Z} \left({\bf y}-\hat{\bf p}_n\right) + \frac{2}{C}{\bf w}, $$ we obtain the following gradient descent rule for MAP estimation \begin{align} {\bf w}_{n+1} &= \left(1-\frac{2\rho_n}{C}\right){\bf w}_n + \rho_n {\bf Z} \left({\bf y}-\hat{\bf p}_n\right) \end{align} Note that the regularization term "pushes" the weights towards zero. 4.4 MAP estimation with Laplacian prior If we assume that ${\bf W}$ follows a multivariate zero-mean Laplacian distribution given by $$ p_{\bf W}({\bf w}) = \frac{1}{(2 C)^{N}} \exp\left(-\frac{1}{C}\|{\bf w}\|_1\right) $$ (where $\|{\bf w}\|=|w_1|+\ldots+|w_N|$ is the $L_1$ norm of ${\bf w}$), the MAP estimate becomes \begin{align} \hat{\bf w}{\text{MAP}} &= \arg\min{\bf w} \left{\text{NLL}({\bf w}) + \frac{1}{C}\|{\bf w}\|_1 \right} \end{align} Parameter $C$ is named the inverse regularization strength. Exercise 7: Derive the gradient descent rules for MAP estimation of the logistic regression weights with Laplacian prior. 5. Other optimization algorithms 5.1. Stochastic Gradient descent. Stochastic gradient descent (SGD) is based on the idea of using a single sample at each iteration of the learning algorithm. The SGD rule for ML logistic regression is \begin{align} {\bf w}_{n+1} &= {\bf w}_n + \rho_n {\bf z}_n \left(y_n-\hat{p}_n\right) \end{align} Once all samples in the training set have been applied, the algorith can continue by applying the training set several times. The computational cost of each iteration of SGD is much smaller than that of gradient descent, though it usually needs many more iterations to converge. Exercise 8: Modify logregFit to implement an algorithm that applies the SGD rule. 5.2. Newton's method Assume that the function to be minimized, $C({\bf w})$, can be approximated by its second order Taylor series expansion around ${\bf w}_0$ $$ C({\bf w}) \approx C({\bf w}0) + \nabla{\bf w}^\top C({\bf w}_0)({\bf w}-{\bf w}_0) + \frac{1}{2}({\bf w}-{\bf w}_0)^\top{\bf H}({\bf w}_0)({\bf w}-{\bf w}_0) $$ where ${\bf H}({\bf w})$ is the <a href=https://en.wikipedia.org/wiki/Hessian_matrix> Hessian matrix</a> of $C$ at ${\bf w}$. Taking the gradient of $C({\bf w})$, and setting the result to ${\bf 0}$, the minimum of C around ${\bf w}_0$ can be approximated as $$ {\bf w}^* = {\bf w}0 - {\bf H}({\bf w}_0)^{-1} \nabla{\bf w}^\top C({\bf w}_0) $$ Since the second order polynomial is only an approximation to $C$, ${\bf w}^$ is only an approximation to the optimal weight vector, but we can expect ${\bf w}^$ to be closer to the minimizer of $C$ than ${\bf w}_0$. Thus, we can repeat the process, computing a second order approximation around ${\bf w}^*$ and a new approximation to the minimizer. <a href=https://en.wikipedia.org/wiki/Newton%27s_method_in_optimization> Newton's method</a> is based on this idea. At each optimization step, the function to be minimized is approximated by a second order approximation using a Taylor series expansion around the current estimate. As a result, the learning rule becomes $$\hat{\bf w}{n+1} = \hat{\bf w}{n} - \rho_n {\bf H}({\bf w}n)^{-1} \nabla{{\bf w}}C({\bf w}_n) $$ 5.2.1. Example: MAP estimation with Gaussian prior. For instance, for the MAP estimate with Gaussian prior, the Hessian matrix becomes $$ {\bf H}({\bf w}) = \frac{2}{C}{\bf I} + \sum_{k=0}^{K-1} g({\bf w}^\top {\bf z}_k) \left[1-g({\bf w}^\top {\bf z}_k)\right]{\bf z}_k {\bf z}_k^\top $$ Defining diagonal matrix $$ {\mathbf S}({\bf w}) = \text{diag}\left[g({\bf w}^\top {\bf z}_k) \left(1-g({\bf w}^\top {\bf z}_k)\right)\right] $$ the Hessian matrix can be written in more compact form as $$ {\bf H}({\bf w}) = \frac{2}{C}{\bf I} + {\bf Z}^\top {\bf S}({\bf w}) {\bf Z} $$ Therefore, the Newton's algorithm for logistic regression becomes \begin{align} {\bf w}{n+1} = {\bf w}{n} + \rho_n \left(\frac{2}{C}{\bf I} + {\bf Z}^\top {\bf S}({\bf w}_{n}) {\bf Z} \right)^{-1} {\bf Z}^\top \left({\bf y}-\hat{\bf p}_n\right) \end{align} Some variants of the Newton method are implemented in the <a href="http://scikit-learn.org/stable/"> Scikit-learn </a> package. End of explanation """ # Create a logistic regression object. LogReg = linear_model.LogisticRegression(C=1.0) # Compute Z_tr poly = PolynomialFeatures(degree=g) Z_tr = poly.fit_transform(Xn_tr) # Normalize columns (this is useful to make algorithms more stable).) Zn, mz, sz = normalize(Z_tr[:,1:]) Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1) # Compute Z_tst Z_tst = poly.fit_transform(Xn_tst) Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz) Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1) # Fit model to data. LogReg.fit(Z_tr, Y_tr) # Classify training and test data D_tr = LogReg.predict(Z_tr) D_tst = LogReg.predict(Z_tst) # Compute error rates E_tr = D_tr!=Y_tr E_tst = D_tst!=Y_tst # Error rates pe_tr = float(sum(E_tr)) / n_tr pe_tst = float(sum(E_tst)) / n_tst print('The final error rates are:') print('- Training:', str(pe_tr)) print('- Test:', str(pe_tst)) # Compute Z_grid Z_grid = poly.fit_transform(X_grid) n_grid = Z_grid.shape[0] Zn, mz, sz = normalize(Z_grid[:,1:], mz, sz) Z_grid = np.concatenate((np.ones((n_grid,1)), Zn), axis=1) # Compute the classifier output for all samples in the grid. dd = LogReg.predict(Z_grid) pp = LogReg.predict_proba(Z_grid)[:,1] pp = pp.reshape(xx.shape) # Paint output maps pylab.rcParams['figure.figsize'] = 6, 6 # Set figure size plt.plot(x0c0, x1c0,'r.', label=labels[c0]) plt.plot(x0c1, x1c1,'g+', label=labels[c1]) plt.xlabel('$x_' + str(ind[0]) + '$') plt.ylabel('$x_' + str(ind[1]) + '$') plt.axis('equal') plt.contourf(xx, yy, pp, cmap=plt.cm.copper) plt.legend(loc='best') plt.contour(xx, yy, pp, levels=[0.5], colors='b', linewidths=(3,)) plt.colorbar(CS, ticks=[0, 0.5, 1]) plt.show() """ Explanation: 6. Logistic regression in Scikit Learn. The <a href="http://scikit-learn.org/stable/"> scikit-learn </a> package includes an efficient implementation of <a href="http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression"> logistic regression</a>. To use it, we must first create a classifier object, specifying the parameters of the logistic regression algorithm. End of explanation """
GoogleCloudPlatform/tf-estimator-tutorials
Experimental/Movielens Recommendation.ipynb
apache-2.0
!pip install annoy import math import os import pandas as pd import numpy as np from datetime import datetime import tensorflow as tf from tensorflow import data print "TensorFlow : {}".format(tf.__version__) SEED = 19831060 """ Explanation: Recommendation Model with Approximate Item Matching This notebook shows how to train a simple Neural Collaborative Filtering model for recommeding movies to users. We also show how learnt movie embeddings are stored in an appoximate similarity matching index, using Spotify's Annoy library, so that we can quickly find and recommend the most relevant movies to a given customer. We show how this index to search for similar movies. In essense, this tutorial works as follows: 1. Download the movielens dataset. 2. Train a simple Neural Collaborative Model using TensorFlow custom estimator. 3. Extract the learnt movie embeddings. 4. Build an approximate similarity matching index for the movie embeddings. 5. Export the trained model, which receives a user Id, and output the user embedding. The recommendation is served as follows: 1. Receives a user Id 2. Get the user embedding from the exported model 3. Find the similar movie embeddings to the user embedding in the index 4. Return the movie Ids of these embeddings to recommend <a href="https://colab.research.google.com/github/GoogleCloudPlatform/tf-estimator-tutorials/blob/master/Experimental/Movielens%20Recommendation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Setup End of explanation """ DATA_DIR='data' ! wget http://files.grouplens.org/datasets/movielens/ml-latest-small.zip -P data/ ! unzip data/ml-latest-small.zip -d data/ TRAIN_DATA_FILE = os.path.join(DATA_DIR, 'ml-latest-small/ratings.csv') ratings_data = pd.read_csv(TRAIN_DATA_FILE) ratings_data.describe() ratings_data.head() movies_data = pd.read_csv(os.path.join(DATA_DIR, 'ml-latest-small/movies.csv')) movies_data.head() """ Explanation: 1. Download Data End of explanation """ HEADER = ['userId', 'movieId', 'rating', 'timestamp'] HEADER_DEFAULTS = [0, 0, 0.0, 0] TARGET_NAME = 'rating' num_users = ratings_data.userId.max() num_movies = movies_data.movieId.max() """ Explanation: 2. Build the TensorFlow Model 2.1 Define Metadata End of explanation """ def make_input_fn(file_pattern, batch_size, num_epochs, mode=tf.estimator.ModeKeys.EVAL): def _input_fn(): dataset = tf.data.experimental.make_csv_dataset( file_pattern=file_pattern, batch_size=batch_size, column_names=HEADER, column_defaults=HEADER_DEFAULTS, label_name=TARGET_NAME, field_delim=',', use_quote_delim=True, header=True, num_epochs=num_epochs, shuffle= (mode==tf.estimator.ModeKeys.TRAIN) ) return dataset return _input_fn """ Explanation: 2.2 Define Data Input Function End of explanation """ def create_feature_columns(embedding_size): feature_columns = [] feature_columns.append( tf.feature_column.embedding_column( tf.feature_column.categorical_column_with_identity( 'userId', num_buckets=num_users + 1), embedding_size ) ) feature_columns.append( tf.feature_column.embedding_column( tf.feature_column.categorical_column_with_identity( 'movieId', num_buckets=num_movies + 1), embedding_size ) ) return feature_columns """ Explanation: 2.3 Create Feature Columns End of explanation """ def model_fn(features, labels, mode, params): feature_columns = create_feature_columns(params.embedding_size) user_layer = tf.feature_column.input_layer( features={'userId': features['userId']}, feature_columns=[feature_columns[0]]) if mode != tf.estimator.ModeKeys.PREDICT: movie_layer = tf.feature_column.input_layer( features={'movieId': features['movieId']}, feature_columns=[feature_columns[1]]) dot_product = tf.keras.layers.Dot(axes=1)([user_layer, movie_layer]) logits = tf.clip_by_value(clip_value_min=0, clip_value_max=5, t=dot_product) predictions = None export_outputs = None loss = None train_op = None if mode == tf.estimator.ModeKeys.PREDICT: predictions = {'user_embedding': user_layer} export_outputs = {'predictions': tf.estimator.export.PredictOutput(predictions)} else: loss = tf.losses.mean_squared_error(labels, tf.squeeze(logits)) train_op=tf.train.FtrlOptimizer(params.learning_rate).minimize( loss=loss, global_step=tf.train.get_global_step()) loss = tf.losses.mean_squared_error(labels, tf.squeeze(logits)) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, export_outputs=export_outputs, loss=loss, train_op=train_op ) """ Explanation: 2.4 Define Model Function End of explanation """ def create_estimator(params, run_config): estimator = tf.estimator.Estimator( model_fn, params=params, config=run_config ) return estimator """ Explanation: 2.5 Create Estimator End of explanation """ def train_and_evaluate_experiment(params, run_config): # TrainSpec #################################### train_input_fn = make_input_fn( TRAIN_DATA_FILE, batch_size=params.batch_size, num_epochs=None, mode=tf.estimator.ModeKeys.TRAIN ) train_spec = tf.estimator.TrainSpec( input_fn = train_input_fn, max_steps=params.traning_steps ) ############################################### # EvalSpec #################################### eval_input_fn = make_input_fn( TRAIN_DATA_FILE, num_epochs=1, batch_size=params.batch_size, ) eval_spec = tf.estimator.EvalSpec( name=datetime.utcnow().strftime("%H%M%S"), input_fn = eval_input_fn, steps=None, start_delay_secs=0, throttle_secs=params.eval_throttle_secs ) ############################################### tf.logging.set_verbosity(tf.logging.INFO) if tf.gfile.Exists(run_config.model_dir): print("Removing previous artefacts...") tf.gfile.DeleteRecursively(run_config.model_dir) print '' estimator = create_estimator(params, run_config) print '' time_start = datetime.utcnow() print("Experiment started at {}".format(time_start.strftime("%H:%M:%S"))) print(".......................................") tf.estimator.train_and_evaluate( estimator=estimator, train_spec=train_spec, eval_spec=eval_spec ) time_end = datetime.utcnow() print(".......................................") print("Experiment finished at {}".format(time_end.strftime("%H:%M:%S"))) print("") time_elapsed = time_end - time_start print("Experiment elapsed time: {} seconds".format(time_elapsed.total_seconds())) return estimator """ Explanation: 2.6 Define Experiment End of explanation """ MODELS_LOCATION = 'models/movieles' MODEL_NAME = 'recommender_01' model_dir = os.path.join(MODELS_LOCATION, MODEL_NAME) params = tf.contrib.training.HParams( batch_size=265, traning_steps=1000, learning_rate=0.1, embedding_size=16, eval_throttle_secs=0, ) run_config = tf.estimator.RunConfig( tf_random_seed=SEED, save_checkpoints_steps=10000, keep_checkpoint_max=3, model_dir=model_dir, ) estimator = train_and_evaluate_experiment(params, run_config) """ Explanation: 2.7 Run Experiment with Parameters End of explanation """ def find_embedding_tensor(): with tf.Session() as sess: saver = tf.train.import_meta_graph(os.path.join(model_dir, 'model.ckpt-100000.meta')) saver.restore(sess, os.path.join(model_dir, 'model.ckpt-100000')) graph = tf.get_default_graph() trainable_tensors = map(str, graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)) for tensor in set(trainable_tensors): print tensor find_embedding_tensor() def extract_embeddings(): with tf.Session() as sess: saver = tf.train.import_meta_graph(os.path.join(model_dir, 'model.ckpt-100000.meta')) saver.restore(sess, os.path.join(model_dir, 'model.ckpt-100000')) graph = tf.get_default_graph() weights_tensor = graph.get_tensor_by_name('input_layer_1/movieId_embedding/embedding_weights:0') weights = np.array(sess.run(weights_tensor)) embeddings = {} for i in range(weights.shape[0]): embeddings[i] = weights[i] return embeddings embeddings = extract_embeddings() """ Explanation: 3. Extract Movie Embeddings End of explanation """ from annoy import AnnoyIndex def build_embeddings_index(num_trees): total_items = 0 annoy_index = AnnoyIndex(params.embedding_size, metric='angular') for item_id in embeddings.keys(): annoy_index.add_item(item_id, embeddings[item_id]) total_items += 1 print "{} items where added to the index".format(total_items) annoy_index.build(n_trees=num_trees) print "Index is built" return annoy_index index = build_embeddings_index(100) frequent_movie_ids = list(ratings_data.movieId.value_counts().index[:15]) movies_data[movies_data['movieId'].isin(frequent_movie_ids)] def get_similar_movies(movie_id, num_matches=5): similar_movie_ids = index.get_nns_by_item( movie_id, num_matches, search_k=-1, include_distances=False) similar_movies = movies_data[movies_data['movieId'].isin(similar_movie_ids)].title return similar_movies for movie_id in frequent_movie_ids: movie_title = movies_data[movies_data['movieId'] == movie_id].title.values[0] print "Movie: {}".format(movie_title) similar_movies = get_similar_movies(movie_id) print "Similar Movies:" print similar_movies print "--------------------------------------" """ Explanation: 4. Build Annoy Index End of explanation """ def make_serving_input_receiver_fn(): return tf.estimator.export.build_raw_serving_input_receiver_fn( {'userId': tf.placeholder(shape=[None], dtype=tf.int32)} ) export_dir = os.path.join(model_dir, 'export') if tf.gfile.Exists(export_dir): tf.gfile.DeleteRecursively(export_dir) estimator.export_savedmodel( export_dir_base=export_dir, serving_input_receiver_fn=make_serving_input_receiver_fn() ) import os export_dir = os.path.join(model_dir, "export") saved_model_dir = os.path.join( export_dir, [f for f in os.listdir(export_dir) if f.isdigit()][0]) print(saved_model_dir) predictor_fn = tf.contrib.predictor.from_saved_model( export_dir = saved_model_dir, ) output = predictor_fn({'userId': [1]}) print(output) """ Explanation: 5. Export the Model This needed to receive a userId and produce the embedding for the user. End of explanation """ def recommend_new_movies(userId, num_recommendations=5): watched_movie_ids = list(ratings_data[ratings_data['userId']==userId]['movieId']) user_emebding = predictor_fn({'userId': [userId]})['user_embedding'][0] similar_movie_ids = index.get_nns_by_vector( user_emebding, num_recommendations + len(watched_movie_ids), search_k=-1, include_distances=False) recommended_movie_ids = set(similar_movie_ids) - set(watched_movie_ids) similar_movies = movies_data[movies_data['movieId'].isin(recommended_movie_ids)].title return similar_movies frequent_user_ids = list((ratings_data.userId.value_counts().index[-350:]))[:5] print recommend_movies(418) """ Explanation: Serve Movie Recommendations to a User End of explanation """
jmhsi/justin_tinker
data_science/courses/deeplearning2/seq2seq-translation.ipynb
apache-2.0
import unicodedata, string, re, random, time, math, torch, torch.nn as nn from torch.autograd import Variable from torch import optim import torch.nn.functional as F import keras, numpy as np from keras.preprocessing import sequence """ Explanation: Requirements End of explanation """ SOS_token = 0 EOS_token = 1 class Lang: def __init__(self, name): self.name = name self.word2index = {} self.word2count = {} self.index2word = {0: "SOS", 1: "EOS"} self.n_words = 2 # Count SOS and EOS def addSentence(self, sentence): for word in sentence.split(' '): self.addWord(word) def addWord(self, word): if word not in self.word2index: self.word2index[word] = self.n_words self.word2count[word] = 1 self.index2word[self.n_words] = word self.n_words += 1 else: self.word2count[word] += 1 """ Explanation: Loading data files The data for this project is a set of many thousands of English to French translation pairs. This question on Open Data Stack Exchange pointed me to the open translation site http://tatoeba.org/ which has downloads available at http://tatoeba.org/eng/downloads - and better yet, someone did the extra work of splitting language pairs into individual text files here: http://www.manythings.org/anki/ The English to French pairs are too big to include in the repo, so download to data/fra.txt before continuing. The file is a tab separated list of translation pairs: I am cold. Je suis froid. We'll need a unique index per word to use as the inputs and targets of the networks later. To keep track of all this we will use a helper class called Lang which has word &rarr; index (word2index) and index &rarr; word (index2word) dictionaries, as well as a count of each word word2count to use to later replace rare words. End of explanation """ # Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427 def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' ) # Lowercase, trim, and remove non-letter characters def normalizeString(s): s = unicodeToAscii(s.lower().strip()) s = re.sub(r"([.!?])", r" \1", s) s = re.sub(r"[^a-zA-Z.!?]+", r" ", s) return s """ Explanation: The files are all in Unicode, to simplify we will turn Unicode characters to ASCII, make everything lowercase, and trim most punctuation. End of explanation """ def readLangs(lang1, lang2, pairs_file, reverse=False): print("Reading lines...") # Read the file and split into lines lines = open('data/%s' % (pairs_file)).read().strip().split('\n') # Split every line into pairs and normalize pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines] # Reverse pairs, make Lang instances if reverse: pairs = [list(reversed(p)) for p in pairs] input_lang = Lang(lang2) output_lang = Lang(lang1) else: input_lang = Lang(lang1) output_lang = Lang(lang2) return input_lang, output_lang, pairs """ Explanation: To read the data file we will split the file into lines, and then split lines into pairs. The files are all English &rarr; Other Language, so if we want to translate from Other Language &rarr; English I added the reverse flag to reverse the pairs. End of explanation """ MAX_LENGTH = 10 eng_prefixes = ( "i am ", "i m ", "he is", "he s ", "she is", "she s", "you are", "you re ", "we are", "we re ", "they are", "they re " ) def filterPair(p): return len(p[0].split(' ')) < MAX_LENGTH and \ len(p[1].split(' ')) < MAX_LENGTH and \ p[1].startswith(eng_prefixes) def filterPairs(pairs): return [pair for pair in pairs if filterPair(pair)] """ Explanation: Since there are a lot of example sentences and we want to train something quickly, we'll trim the data set to only relatively short and simple sentences. Here the maximum length is 10 words (that includes ending punctuation) and we're filtering to sentences that translate to the form "I am" or "He is" etc. (accounting for apostrophes replaced earlier). End of explanation """ def prepareData(lang1, lang2, pairs_file, reverse=False): input_lang, output_lang, pairs = readLangs(lang1, lang2, pairs_file, reverse) print("Read %s sentence pairs" % len(pairs)) pairs = filterPairs(pairs) print("Trimmed to %s sentence pairs" % len(pairs)) print("Counting words...") for pair in pairs: input_lang.addSentence(pair[0]) output_lang.addSentence(pair[1]) print("Counted words:") print(input_lang.name, input_lang.n_words) print(output_lang.name, output_lang.n_words) return input_lang, output_lang, pairs input_lang, output_lang, pairs = prepareData('eng', 'fra', 'fra.txt', True) print(random.choice(pairs)) def indexesFromSentence(lang, sentence): return [lang.word2index[word] for word in sentence.split(' ')]+[EOS_token] def variableFromSentence(lang, sentence): indexes = indexesFromSentence(lang, sentence) return Variable(torch.LongTensor(indexes).unsqueeze(0)) def variablesFromPair(pair): input_variable = variableFromSentence(input_lang, pair[0]) target_variable = variableFromSentence(output_lang, pair[1]) return (input_variable, target_variable) def index_and_pad(lang, dat): return sequence.pad_sequences([indexesFromSentence(lang, s) for s in dat], padding='post').astype(np.int64) fra, eng = list(zip(*pairs)) fra = index_and_pad(input_lang, fra) eng = index_and_pad(output_lang, eng) def get_batch(x, y, batch_size=16): idxs = np.random.permutation(len(x))[:batch_size] return x[idxs], y[idxs] """ Explanation: The full process for preparing the data is: Read text file and split into lines, split lines into pairs Normalize text, filter by length and content Make word lists from sentences in pairs End of explanation """ class EncoderRNN(nn.Module): def __init__(self, input_size, hidden_size, n_layers=1): super(EncoderRNN, self).__init__() self.hidden_size = hidden_size self.embedding = nn.Embedding(input_size, hidden_size) self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, num_layers=n_layers) def forward(self, input, hidden): output, hidden = self.gru(self.embedding(input), hidden) return output, hidden # TODO: other inits def initHidden(self, batch_size): return Variable(torch.zeros(1, batch_size, self.hidden_size)) """ Explanation: The Encoder The encoder of a seq2seq network is a RNN that outputs some value for every word from the input sentence. For every input word the encoder outputs a vector and a hidden state, and uses the hidden state for the next input word. End of explanation """ class DecoderRNN(nn.Module): def __init__(self, hidden_size, output_size, n_layers=1): super(DecoderRNN, self).__init__() self.embedding = nn.Embedding(output_size, hidden_size) self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, num_layers=n_layers) # TODO use transpose of embedding self.out = nn.Linear(hidden_size, output_size) self.sm = nn.LogSoftmax() def forward(self, input, hidden): emb = self.embedding(input).unsqueeze(1) # NB: Removed relu res, hidden = self.gru(emb, hidden) output = self.sm(self.out(res[:,0])) return output, hidden """ Explanation: Simple Decoder In the simplest seq2seq decoder we use only last output of the encoder. This last output is sometimes called the context vector as it encodes context from the entire sequence. This context vector is used as the initial hidden state of the decoder. At every step of decoding, the decoder is given an input token and hidden state. The initial input token is the start-of-string &lt;SOS&gt; token, and the first hidden state is the context vector (the encoder's last hidden state). End of explanation """ class AttnDecoderRNN(nn.Module): def __init__(self, hidden_size, output_size, n_layers=1, dropout_p=0.1, max_length=MAX_LENGTH): super(AttnDecoderRNN, self).__init__() self.hidden_size = hidden_size self.output_size = output_size self.n_layers = n_layers self.dropout_p = dropout_p self.max_length = max_length self.embedding = nn.Embedding(self.output_size, self.hidden_size) self.attn = nn.Linear(self.hidden_size * 2, self.max_length) self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size) self.dropout = nn.Dropout(self.dropout_p) self.gru = nn.GRU(self.hidden_size, self.hidden_size) self.out = nn.Linear(self.hidden_size, self.output_size) def forward(self, input, hidden, encoder_output, encoder_outputs): embedded = self.embedding(input).view(1, 1, -1) embedded = self.dropout(embedded) attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1))) attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.unsqueeze(0)) output = torch.cat((embedded[0], attn_applied[0]), 1) output = self.attn_combine(output).unsqueeze(0) for i in range(self.n_layers): output = F.relu(output) output, hidden = self.gru(output, hidden) output = F.log_softmax(self.out(output[0])) return output, hidden, attn_weights def initHidden(self): return Variable(torch.zeros(1, 1, self.hidden_size)) """ Explanation: Attention Decoder If only the context vector is passed betweeen the encoder and decoder, that single vector carries the burden of encoding the entire sentence. Attention allows the decoder network to "focus" on a different part of the encoder's outputs for every step of the decoder's own outputs. First we calculate a set of attention weights. These will be multiplied by the encoder output vectors to create a weighted combination. The result (called attn_applied in the code) should contain information about that specific part of the input sequence, and thus help the decoder choose the right output words. Calculating the attention weights is done with another feed-forward layer attn, using the decoder's input and hidden state as inputs. Because there are sentences of all sizes in the training data, to actually create and train this layer we have to choose a maximum sentence length (input length, for encoder outputs) that it can apply to. Sentences of the maximum length will use all the attention weights, while shorter sentences will only use the first few. End of explanation """ def train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH): batch_size, input_length = input_variable.size() target_length = target_variable.size()[1] encoder_hidden = encoder.initHidden(batch_size).cuda() encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() loss = 0 encoder_output, encoder_hidden = encoder(input_variable, encoder_hidden) decoder_input = Variable(torch.LongTensor([SOS_token]*batch_size)).cuda() decoder_hidden = encoder_hidden for di in range(target_length): decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden) #, encoder_output, encoder_outputs) targ = target_variable[:, di] # print(decoder_output.size(), targ.size(), target_variable.size()) loss += criterion(decoder_output, targ) decoder_input = targ loss.backward() encoder_optimizer.step() decoder_optimizer.step() return loss.data[0] / target_length def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def timeSince(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (- %s)' % (asMinutes(s), asMinutes(rs)) def trainEpochs(encoder, decoder, n_epochs, print_every=1000, plot_every=100, learning_rate=0.01): start = time.time() plot_losses = [] print_loss_total = 0 # Reset every print_every plot_loss_total = 0 # Reset every plot_every encoder_optimizer = optim.RMSprop(encoder.parameters(), lr=learning_rate) decoder_optimizer = optim.RMSprop(decoder.parameters(), lr=learning_rate) criterion = nn.NLLLoss().cuda() for epoch in range(1, n_epochs + 1): training_batch = get_batch(fra, eng) input_variable = Variable(torch.LongTensor(training_batch[0])).cuda() target_variable = Variable(torch.LongTensor(training_batch[1])).cuda() loss = train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion) print_loss_total += loss plot_loss_total += loss if epoch % print_every == 0: print_loss_avg = print_loss_total / print_every print_loss_total = 0 print('%s (%d %d%%) %.4f' % (timeSince(start, epoch / n_epochs), epoch, epoch / n_epochs * 100, print_loss_avg)) if epoch % plot_every == 0: plot_loss_avg = plot_loss_total / plot_every plot_losses.append(plot_loss_avg) plot_loss_total = 0 showPlot(plot_losses) """ Explanation: Note: There are other forms of attention that work around the length limitation by using a relative position approach. Read about "local attention" in Effective Approaches to Attention-based Neural Machine Translation. Training To train we run the input sentence through the encoder, and keep track of every output and the latest hidden state. Then the decoder is given the &lt;SOS&gt; token as its first input, and the last hidden state of the decoder as its first hidden state. "Teacher forcing" is the concept of using the real target outputs as each next input, instead of using the decoder's guess as the next input. Using teacher forcing causes it to converge faster but when the trained network is exploited, it may exhibit instability. End of explanation """ # TODO: Make this change during training teacher_forcing_ratio = 0.5 def attn_train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH): encoder_hidden = encoder.initHidden() encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() input_length = input_variable.size()[0] target_length = target_variable.size()[0] encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size)) loss = 0 for ei in range(input_length): encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden) encoder_outputs[ei] = encoder_output[0][0] decoder_input = Variable(torch.LongTensor([[SOS_token]])) decoder_hidden = encoder_hidden use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False if use_teacher_forcing: # Teacher forcing: Feed the target as the next input for di in range(target_length): decoder_output, decoder_hidden, decoder_attention = decoder( decoder_input, decoder_hidden, encoder_output, encoder_outputs) loss += criterion(decoder_output[0], target_variable[di]) decoder_input = target_variable[di] # Teacher forcing else: # Without teacher forcing: use its own predictions as the next input for di in range(target_length): decoder_output, decoder_hidden, decoder_attention = decoder( decoder_input, decoder_hidden, encoder_output, encoder_outputs) topv, topi = decoder_output.data.topk(1) ni = topi[0][0] decoder_input = Variable(torch.LongTensor([[ni]])) loss += criterion(decoder_output[0], target_variable[di]) if ni == EOS_token: break loss.backward() encoder_optimizer.step() decoder_optimizer.step() return loss.data[0] / target_length """ Explanation: Attention End of explanation """ import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np %matplotlib inline def showPlot(points): plt.figure() fig, ax = plt.subplots() loc = ticker.MultipleLocator(base=0.2) # this locator puts ticks at regular intervals ax.yaxis.set_major_locator(loc) plt.plot(points) """ Explanation: Plotting results Plotting is done with matplotlib, using the array of loss values plot_losses saved while training. End of explanation """ def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH): input_variable = variableFromSentence(input_lang, sentence).cuda() input_length = input_variable.size()[0] encoder_hidden = encoder.initHidden(1).cuda() encoder_output, encoder_hidden = encoder(input_variable, encoder_hidden) decoder_input = Variable(torch.LongTensor([SOS_token])).cuda() decoder_hidden = encoder_hidden decoded_words = [] # decoder_attentions = torch.zeros(max_length, max_length) for di in range(max_length): # decoder_output, decoder_hidden, decoder_attention = decoder( decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden) #, encoder_output, encoder_outputs) # decoder_attentions[di] = decoder_attention.data topv, topi = decoder_output.data.topk(1) ni = topi[0][0] if ni == EOS_token: decoded_words.append('<EOS>') break else: decoded_words.append(output_lang.index2word[ni]) decoder_input = Variable(torch.LongTensor([ni])).cuda() return decoded_words,0#, decoder_attentions[:di+1] def evaluateRandomly(encoder, decoder, n=10): for i in range(n): pair = random.choice(pairs) print('>', pair[0]) print('=', pair[1]) output_words, attentions = evaluate(encoder, decoder, pair[0]) output_sentence = ' '.join(output_words) print('<', output_sentence) print('') """ Explanation: Evaluation Evaluation is mostly the same as training, but there are no targets so we simply feed the decoder's predictions back to itself for each step. Every time it predicts a word we add it to the output string, and if it predicts the EOS token we stop there. We also store the decoder's attention outputs for display later. End of explanation """ #TODO: # - Test set # - random teacher forcing # - attention # - multi layers # - bidirectional encoding hidden_size = 256 encoder1 = EncoderRNN(input_lang.n_words, hidden_size).cuda() attn_decoder1 = DecoderRNN(hidden_size, output_lang.n_words).cuda() trainEpochs(encoder1, attn_decoder1, 15000, print_every=500, learning_rate=0.005) evaluateRandomly(encoder1, attn_decoder1) """ Explanation: Training and Evaluating Note: If you run this notebook you can train, interrupt the kernel, evaluate, and continue training later. Comment out the lines where the encoder and decoder are initialized and run trainEpochs again. End of explanation """ output_words, attentions = evaluate(encoder1, attn_decoder1, "je suis trop froid .") plt.matshow(attentions.numpy()) """ Explanation: Visualizing Attention A useful property of the attention mechanism is its highly interpretable outputs. Because it is used to weight specific encoder outputs of the input sequence, we can imagine looking where the network is focused most at each time step. You could simply run plt.matshow(attentions) to see attention output displayed as a matrix, with the columns being input steps and rows being output steps: NOTE: This only works when using the attentional decoder, if you've been following the notebook to this point you are using the standard decoder. End of explanation """ def showAttention(input_sentence, output_words, attentions): # Set up figure with colorbar fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(attentions.numpy(), cmap='bone') fig.colorbar(cax) # Set up axes ax.set_xticklabels([''] + input_sentence.split(' ') + ['<EOS>'], rotation=90) ax.set_yticklabels([''] + output_words) # Show label at every tick ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) plt.show() def evaluateAndShowAttention(input_sentence): output_words, attentions = evaluate(encoder1, attn_decoder1, input_sentence) print('input =', input_sentence) print('output =', ' '.join(output_words)) showAttention(input_sentence, output_words, attentions) evaluateAndShowAttention("elle a cinq ans de moins que moi .") evaluateAndShowAttention("elle est trop petit .") evaluateAndShowAttention("je ne crains pas de mourir .") evaluateAndShowAttention("c est un jeune directeur plein de talent .") """ Explanation: For a better viewing experience we will do the extra work of adding axes and labels: End of explanation """
mbeyeler/opencv-machine-learning
notebooks/04.01-Preprocessing-Data.ipynb
mit
from sklearn import preprocessing import numpy as np X = np.array([[ 1., -2., 2.], [ 3., 0., 0.], [ 0., 1., -1.]]) """ Explanation: <!--BOOK_INFORMATION--> <a href="https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv" target="_blank"><img align="left" src="data/cover.jpg" style="width: 76px; height: 100px; background: white; padding: 1px; border: 1px solid black; margin-right:10px;"></a> This notebook contains an excerpt from the book Machine Learning for OpenCV by Michael Beyeler. The code is released under the MIT license, and is available on GitHub. Note that this excerpt contains only the raw code - the book is rich with additional explanations and illustrations. If you find this content useful, please consider supporting the work by buying the book! <!--NAVIGATION--> < Representing Data and Engineering Features | Contents | Reducing the Dimensionality of the Data > Preprocessing Data The more disciplined we are in handling our data, the better results we are likely to achieve in the end. The first step in this procedure is known as data preprocessing. Standardizing features Standardization refers to the process of scaling the data to have zero mean and unit variance. This is a common requirement for a wide range of machine learning algorithms, which might behave badly if individual features do not fulfill this requirement. We could manually standardize our data by subtracting from every data point the mean value ($\mu$) of all the data, and dividing that by the variance ($\sigma$) of the data; that is, for every feature $x$, we would compute $(x - \mu) / \sigma$. Alternatively, scikit-learn offers a straightforward implementation of this process in its preprocessing module. Let's consider a 3 x 3 data matrix X, standing for three data points (rows) with three arbitrarily chosen feature values each (columns): End of explanation """ X_scaled = preprocessing.scale(X) X_scaled """ Explanation: Then, standardizing the data matrix X can be achieved with the function scale: End of explanation """ X_scaled.mean(axis=0) """ Explanation: Let's make sure X_scaled is indeed standardized: zero mean, unit variance End of explanation """ X_scaled.std(axis=0) """ Explanation: In addition, every row of the standardized feature matrix should have variance of 1 (which is the same as checking for a standard deviation of 1 using std): End of explanation """ X_normalized_l1 = preprocessing.normalize(X, norm='l1') X_normalized_l1 """ Explanation: Normalizing features Similar to standardization, normalization is the process of scaling individual samples to have unit norm. I'm sure you know that the norm stands for the length of a vector, and can be defined in different ways. We discussed two of them in the previous chapter: the L1 norm (or Manhattan distance) and the L2 norm (or Euclidean distance). X can be normalized using the normalize function, and the L1 norm is specified by the norm keyword: End of explanation """ X_normalized_l2 = preprocessing.normalize(X, norm='l2') X_normalized_l2 """ Explanation: Similarly, the L2 norm can be computed by specifying norm='l2': End of explanation """ min_max_scaler = preprocessing.MinMaxScaler() X_min_max = min_max_scaler.fit_transform(X) X_min_max """ Explanation: Scaling features to a range An alternative to scaling features to zero mean and unit variance is to get features to lie between a given minimum and maximum value. Often these values are zero and one, so that the maximum absolute value of each feature is scaled to unit size. In scikit-learn, this can be achieved using MinMaxScaler: End of explanation """ min_max_scaler = preprocessing.MinMaxScaler(feature_range=(-10, 10)) X_min_max2 = min_max_scaler.fit_transform(X) X_min_max2 """ Explanation: By default, the data will be scaled to fall within 0 and 1. We can specify different ranges by passing a keyword argument feature_range to the MinMaxScaler constructor: End of explanation """ X """ Explanation: Binarizing features Finally, we might find ourselves not caring too much about the exact feature values of the data. Instead, we might just want to know if a feature is present or absent. Binarizing the data can be achieved by thresholding the feature values. Let's quickly remind ourselves of our feature matrix, X: End of explanation """ binarizer = preprocessing.Binarizer(threshold=0.5) X_binarized = binarizer.transform(X) X_binarized """ Explanation: Let's assume that these numbers represent the thousands of dollars in our bank accounts. If there are more than 0.5 thousand dollars in the account, we consider the person rich, which we represent with a 1. Else we put a 0. This is akin to thresholding the data with threshold=0.5: End of explanation """ from numpy import nan X = np.array([[ nan, 0, 3 ], [ 2, 9, -8 ], [ 1, nan, 1 ], [ 5, 2, 4 ], [ 7, 6, -3 ]]) """ Explanation: The result is a matrix made entirely of ones and zeros. Handling missing data Another common need in feature engineering is the handling of missing data. For example, we might have a dataset that looks like this: End of explanation """ from sklearn.preprocessing import Imputer imp = Imputer(strategy='mean') X2 = imp.fit_transform(X) X2 """ Explanation: Most machine learning algorithms cannot handle the Not a Number (NAN) values (nan in Python). Instead, we first have to replace all the nan values with some appropriate fill values. This is known as imputation of missing values. Three different strategies to impute missing values are offered by scikit-learn: - 'mean': Replaces all nan values with the mean value along a specified axis of the matrix (default: axis=0). - 'median': Replaces all nan values with median value along a specified axis of the matrix (default: axis=0). - 'most_frequent': Replaces all nan values with the most frequent value along a specified axis of the matrix (default: axis=0). For example, the 'mean' imputer can be called as follows: End of explanation """ np.mean(X[1:, 0]), X2[0, 0] """ Explanation: Let's verify the math by calculating the mean by hand, should evaluate to 3.75 (same as X2[0, 0]): End of explanation """ imp = Imputer(strategy='median') X3 = imp.fit_transform(X) X3 """ Explanation: Similarly, the 'median' strategy relies on the same code: End of explanation """ np.median(X[1:, 0]), X3[0, 0] """ Explanation: Let's make sure the median of the column evaluates to 3.5 (same as X3[0, 0]): End of explanation """
nreimers/deeplearning4nlp-tutorial
2015-10_Lecture/Lecture2/code/3_Intro_Lasagne_Solution.ipynb
apache-2.0
import gzip import cPickle import numpy as np import theano import theano.tensor as T import lasagne # Load the pickle file for the MNIST dataset. dataset = 'data/mnist.pkl.gz' f = gzip.open(dataset, 'rb') train_set, dev_set, test_set = cPickle.load(f) f.close() #train_set contains 2 entries, first the X values, second the Y values train_x, train_y = train_set dev_x, dev_y = dev_set test_x, test_y = test_set """ Explanation: Introduction to Lasagne There are various libaries building on top of Theano to provide easy buidling blocks for designing deep neural networks. Some of them are: - Lasagne (https://github.com/Lasagne/Lasagne) - Blocks (https://github.com/mila-udem/blocks) - Keras (http://keras.io/) - OpenDeep (http://www.opendeep.org/) All libaries are kind of similar but different in the details, for example in the design philosophy. I chose (after too little research) Lasagne as it will allow you to interact with Theano and the computation graph. Keep an eye onto this evolving area. For a great example how to use Lasagne for MNIST see the Lasagne Tutorial: http://lasagne.readthedocs.org/en/latest/user/tutorial.html Bascis Lasagne provides you with several basic components to build your neural networks. Instead of defining your HiddenLayer and SoftmaxLayer as in the previous example, you can use existent implementations from the library and easily plug them together. In the following we will reimplement the MLP for the MNIST-dataset using Lasagne. For more information on Lasagne see http://lasagne.readthedocs.org/en/latest/ Load your dataset As before we load our dataset. See 2_MNIST for more details. End of explanation """ def build_mlp(n_in, n_hidden, n_out, input_var=None): #Input layer, 1 dimension = number of samples, 2 dimension = input, our 28*28 image l_in = lasagne.layers.InputLayer(shape=(None, n_in), input_var=input_var) # Our first hidden layer with n_hidden units # As nonlinearity we use tanh, you could also try rectify l_hid1 = lasagne.layers.DenseLayer(incoming=l_in, num_units=n_hidden, nonlinearity=lasagne.nonlinearities.tanh, W=lasagne.init.GlorotUniform()) # Our output layer (a softmax layer) l_out = lasagne.layers.DenseLayer(incoming=l_hid1, num_units=n_out, nonlinearity=lasagne.nonlinearities.softmax) return l_out """ Explanation: Build the MLP Now we use the provided layers from Lasagne to build our MLP End of explanation """ # Parameters n_in = 28*28 n_hidden = 50 n_out = 10 # Create the network x = T.dmatrix('x') # the data, one image per row y = T.lvector('y') # the labels are presented as 1D vector of [int] labels network = build_mlp(n_in, n_hidden, n_out, x) # Create a loss expression for training, i.e., a scalar objective we want # to minimize (for our multi-class problem, it is the cross-entropy loss): prediction = lasagne.layers.get_output(network) loss = lasagne.objectives.categorical_crossentropy(prediction, y) loss = loss.mean() # Create update expressions for training, i.e., how to modify the # parameters at each training step. Here, we'll use Stochastic Gradient # Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more. params = lasagne.layers.get_all_params(network, trainable=True) updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.01, momentum=0.9) # Predict the labels network_predict_label = T.argmax(lasagne.layers.get_output(network, deterministic=True), axis=1) # Compile a function performing a training step on a mini-batch (by giving # the updates dictionary) and returning the corresponding training loss: train_fn = theano.function(inputs=[x, y], outputs=loss, updates=updates) # Create the predict_labels function predict_labels = theano.function(inputs=[x], outputs=network_predict_label) """ Explanation: Create the Train Function After loading the data and defining the MLP, we can now create the train function. End of explanation """ #Function that helps to iterate over our data in minibatches def iterate_minibatches(inputs, targets, batchsize, shuffle=False): assert len(inputs) == len(targets) if shuffle: indices = np.arange(len(inputs)) np.random.shuffle(indices) for start_idx in range(0, len(inputs) - batchsize + 1, batchsize): if shuffle: excerpt = indices[start_idx:start_idx + batchsize] else: excerpt = slice(start_idx, start_idx + batchsize) yield inputs[excerpt], targets[excerpt] #Method to compute the accruarcy. Call predict_labels to get the labels for the dataset def compute_accurarcy(dataset_x, dataset_y): predictions = predict_labels(dataset_x) errors = sum(predictions != dataset_y) #Number of errors accurarcy = 1 - errors/float(len(dataset_y)) return accurarcy number_of_epochs = 10 print "%d epochs" % number_of_epochs for epoch in xrange(number_of_epochs): for batch in iterate_minibatches(train_x, train_y, 20, shuffle=True): inputs, targets = batch train_fn(inputs, targets) accurarcy_dev = compute_accurarcy(dev_x, dev_y) accurarcy_test = compute_accurarcy(test_x, test_y) print "%d epoch: Accurarcy on dev: %f, accurarcy on test: %f" % (epoch, accurarcy_dev, accurarcy_test) print "DONE" """ Explanation: Train the model We run the training for some epochs and output the accurarcy of our network End of explanation """
SSDS-Croatia/SSDS-2017
Day-1/First day - Introduction to Machine Learning with Tensorflow [SOLVED].ipynb
mit
import tensorflow as tf """ Explanation: Summer School of Data Science - Split '17 1. Introduction to Machine Learning with TensorFlow This hands-on session serves as an introductory course for essential TensorFlow usage and basic machine learning with TensorFlow. This notebook is partly based on and follow the approach of chapter 6 of the book "Deep Learning" by Ian Goodfellow, Yoshua Bengio and Aaron Courville, available at: http://www.deeplearningbook.org/. Other useful tutorials exist in the form of Jupyter notebooks, some of which are: - https://github.com/udacity/deep-learning - https://github.com/DataScienceUB/DeepLearningfromScratch This notebook covers basic TensorFlow usage concepts, which are then applied to elementary machine learning models like linear and logistic regression, and finally a simple multilayer perceptron is built and trained using the established TensorFlow concepts. Basic TensorFlow concepts TensorFlow is an open source Python library which provides multiple APIs for buidling and evaluating computational graphs. These graphs can be used to represent any machine learning model, and TensorFlow provides methods for efficient optimization and evaluation of the models. The programmer's guide for TensorFlow can be found at https://www.tensorflow.org/programmers_guide/, and the full documentation is availale at https://www.tensorflow.org/api_docs/python/. The import statement for TensorFlow programs is: import tensorflow as tf. This provides access to all TensorFlow APIs, classes, methods and symbols. End of explanation """ # create a TensorFlow constant tensor t = tf.constant(5) print(t) # create a TensorFlow constant of a specific data type and shape t = tf.constant(7,shape=[2,3],dtype=tf.float32,name="const_tensor") print(t) """ Explanation: Tensor The basic concept behind TensorFlow is the tensor - an n-dimensional array of a base datatype. In TensorFlow it is represented by the tf.Tensor object which will produce a value when evaluated. A tf.Tensor object has a shape (which defines the structure of the elements) and a data type, shared by all the elements in the Tensor. The main types of tensors are: - Constant - Variable - Placeholder The tf.constant() method creates a constant tensor, populated with values of a data type, specified by arguments value, shape (optional), dtype (optional). End of explanation """ # create a TensorFlow session and evaluate the created constant sess=tf.Session() print(sess.run(t)) """ Explanation: However, any Tensor is only evaluated within a Session, which is the environment in which all tensors and operations are executed. End of explanation """ # create a tensor of any shape populated with zeros and check within the session t = tf.zeros([3,2,2]) print(sess.run(t)) # create a tensor of any shape populated with ones and check within the session t = tf.ones([2,4]) print(sess.run(t)) """ Explanation: Other very common and useful methods for creating tensors of constant value are tf.zeros() and tf.ones(). End of explanation """ # create a random tensor containing values from a uniform distribution between 10 and 20 t = tf.random_uniform([3,4,2],minval=10,maxval=20) print(t) print(sess.run(t)) """ Explanation: Tensors containing random values from various distribution can be created using a number of methods, with the most commonly used being tf.random_uniform() and tf.random_normal(). End of explanation """ # add a scalar to a tensor a = tf.ones([3,2]) sess.run(a+3) # subtract two tensors a = tf.constant(4.,shape=[2,3]) print(a) b = tf.random_normal(shape=[2,3]) print(b) sess.run(a-b) # divide two integer tensors a = tf.constant(4,shape=[2,3]) b = tf.constant(7,shape=[2,3]) print(a/b) """ Explanation: Simple algebraic operations such as +,-,/,and * can be used with tensors in this form, or by calling tf.add(), tf.subtract(), tf.divide(), or tf.multiply(). These are all element-wise, and defined for tensors of equal shapes and data-types. Tensors can be cast into a specific data type by calling tf.cast(). End of explanation """ # try out varied mathematical operations with various tensors a = tf.exp(tf.random_normal(shape=[3,2])) print(sess.run(a)) b = tf.matmul(a,tf.transpose(a)) print(sess.run(b)) """ Explanation: Other very useful operations include: - Absolute value (modulus) - tf.abs() - Exponentiation with $e$ - tf.exp() - Square and other powers - tf.square() and tf.pow() - Matrix multiplication - tf.matmul() - Transpose - tf.transpose() End of explanation """ # create a placeholder and feed it a value in a session a = tf.placeholder(dtype=tf.float32) b = tf.exp(a) print(b) print(sess.run(b,feed_dict={a:5})) # create two placeholders and a tensor implementing matrix multiplication x1 = tf.placeholder(dtype=tf.float32) x2 = tf.placeholder(dtype=tf.float32) y = tf.matmul(x1,x2) print(sess.run(y,{x2:[[1,2],[3,4]],x1:[[1,2],[3,4]]})) """ Explanation: Placeholders and Variables Placeholders and Vairables are special kinds of tensors which are the essential building blocks of more complex data and computation streams. These are the most commonly used types of tensors in TensorFlow. A Placeholder is a tensor which acts like a "promise" to provide a value at the evaluation of the computational graph. Placeholders are mostly used as input points in the computational graph where data will be provided. It will produce an error when evaluated, unless the value is fed to the session. End of explanation """ # create a variable, initialize it, and assign a new value within a session sess = tf.Session() a = tf.Variable(5) print(a) sess.run(tf.global_variables_initializer()) sess.run(a) sess.run(tf.assign(a,6)) print(sess.run(a)) sess.close() sess = tf.Session() sess.run(tf.global_variables_initializer()) print(sess.run(a)) """ Explanation: A Variable is a tensor which allows the addition of trainable parameters to the computational graph. Constants are intialized when created, as opposed to variables, which need to be initialized within the session (and the initialization procedure must be defined). Variables can be "manually" assigned a new value using tf.assign, and their state is kept within the session object. This is mostly used for model training, during which variables are changed within the optimization process. End of explanation """ #define placeholders for data x = tf.placeholder(dtype=tf.float32,shape=[None]) y = tf.placeholder(dtype=tf.float32,shape=[None]) #define model parameters as variables w = tf.Variable(tf.random_normal(shape=())) b = tf.Variable(tf.random_normal([])) #create a tensor which calculates the model output y_model = w*x + b """ Explanation: Linear regression in TensorFlow Linear regression is one of the simplest and most commonly used regression models. The multivariate linear regression can be written as: $$y = w^{T}x + b$$ where $y \in \mathbb{R}$ is the output, $w \in \mathbb{R}^{p}$ is a column vector containing $p$ weights for $p$ features in $x \in \mathbb{R}^{p}$, and $b \in \mathbb{R}$ is the bias. The parameters contained in $w$ and $b$ are also called coefficients and are trained by using a gradient descent algorithm. Exercise: Let us build a univariate linear regression model for a simple problem, using the previously introduced TensorFlow concepts: - The model input $x$ is a placeholder for data - The trainable model parameters $w$ and $b$ are defined as TensorFlow Variables - The model output $\hat{y}$ is a Tensor - The obesrved output $y$ is also a placeholder, where data will be provided for training purpose End of explanation """ #define the loss function as the mean of all squared errors (MSE) loss = tf.reduce_mean(tf.square(y_model-y)) #create a gradient descent optimizer optimizer = tf.train.GradientDescentOptimizer(0.1) #create a train operation train = optimizer.minimize(loss) #generate data to train the regression import numpy as np x_train = np.random.normal(size=10) y_train = 5*x_train + 10 + np.random.normal(size=10)/10 print(x_train,y_train) #initialize variables, run 100 epochs of training algorithm sess.run(tf.global_variables_initializer()) for epoch in range(100): sess.run(train,{y:y_train,x:x_train}) print('w:',sess.run(w),', b:',sess.run(b),', loss:',sess.run(loss,{y:y_train,x:x_train})) """ Explanation: To train a model built in TensorFlow, a loss function needs to be defined, most commonly as a reduction operation. An optimizer object needs to be defined, and the minimize() method called in order to update the variables defined within the model to minimize the selected loss function. When creating optimizer objects, choices about the learning rate have to be made - these, in combination with the number of training epochs, can greatly influence the model training process. With the approapriate learning rate, the optimization can quickly converge. End of explanation """ #generate XOR training data import numpy as np x_train = np.array([[0,0],[0,1],[1,0],[1,1]]) y_train = np.array([[0],[1],[1],[0]]) #import matplotlib for visualization %matplotlib inline import matplotlib.pyplot as plt #logical indices of data where the outputs are 1 and 0 t = np.where(y_train==1)[0] f = np.where(y_train==0)[0] #scatter plot of the data plt.scatter(x_train[t,0],x_train[t,1],c='b',marker='x',s=70) plt.scatter(x_train[f,0],x_train[f,1],c='r',marker='o',s=70) """ Explanation: Logistic Regression Logistic regression is a very common and simple linear model for classification purposes, based on linear regression and the logistic function: $$y = \frac{1}{1+e^{-(w^{T}x + b)}}$$ Due to the nature of the logistic function, it produces output values in the range $[0,1]$, thus providing a probability for each class given in the output. Similar to linear regression, the variables defined within the logistic regression model are parameters trainable by various optimization algorithms. Let us build a logistic regression for the well-known XOR problem. End of explanation """ #define placeholders for the data x = tf.placeholder(dtype=tf.float32,shape=[None,2]) y = tf.placeholder(dtype=tf.float32,shape=[None,1]) #define variables for the trainable parameters of the model w = tf.Variable(tf.random_normal([2,1]),name="weights") b = tf.Variable(tf.random_normal([1]), name="bias") #create a tensor to calculate the model output y_model = 1/(1+tf.exp(-(tf.matmul(x,w) + b))) #define the loss function, create the optimizer and the training operation loss = tf.reduce_mean(tf.square(y_model-y)) optimizer = tf.train.GradientDescentOptimizer(0.3) train = optimizer.minimize(loss) #train the model sess.run(tf.global_variables_initializer()) for epoch in range(1000): sess.run(train,{x:x_train,y:y_train}) print('w:',sess.run(w,{y:y_train,x:x_train}),', b:',sess.run(b,{y:y_train,x:x_train}),', loss:',sess.run(loss,{y:y_train,x:x_train})) """ Explanation: Exercise: The model input $x$ is a placeholder for a data The trainable model parameters $w$ and $b$ are defined as TensorFlow Variables The model output $\hat{y}$ is a Tensor The obesrved output $y$ is also a placeholder, where output data will be provided in order to train the model End of explanation """ print(sess.run(y_model,{x:x_train})) """ Explanation: Inspect the trained model parameters and the model outputs. What is the minimum found by the optimizer? End of explanation """ sess = tf.Session() x_train = np.array([[0,0],[0,1],[1,0],[1,1]]) y_train = np.array([[0],[1],[1],[0]]) X = tf.placeholder(tf.float32,[None,2]) y = tf.placeholder(tf.float32,[None,1]) W1 = tf.Variable(tf.random_uniform([2,2]),name="weights1") b1 = tf.Variable(tf.random_uniform([2]), name="bias1") W2 = tf.Variable(tf.random_uniform([2,1]),name="weights2") b2 = tf.Variable(tf.random_uniform([1]), name="bias2") f1 = tf.matmul(X,W1)+b1 f2 = tf.nn.sigmoid(f1) y_model = tf.matmul(f2,W2)+b2 loss = tf.reduce_mean(tf.square(y_model-y)) optimizer = tf.train.GradientDescentOptimizer(0.35) #optimizer = tf.train.AdamOptimizer(0.1) train = optimizer.minimize(loss) sess.run(tf.global_variables_initializer()) for epoch in range(1000): sess.run(train, feed_dict={X: x_train, y: y_train}) print("loss:", sess.run(loss,{X: x_train, y: y_train})) """ Explanation: Multilayer Perceptron A multilayer perceptron is a feedforward network that can be thought of a model composed of multiple nested functions, for instance: $$y = f^{(3)}(f^{(2)}(f^{(1)}(x)))$$ This means that the output of each function is routed as the input of the next function, and this operational and data flow is strictly one-directional (thus "feedforward") and may contain multiple layers of nested functions (thus "deep"). TensorFlow is a very suitable tool for building and training such models. Here we will consider the XOR problem once again, and build a multilayer perceptron to classify the data correctly. It was demonstrated previously that the XOR data are not linearly separable - this means that a non-linear layer (function) within the model is needed to tranform the problem to a linearly separable space. This is in fact the core of the multilayer perceptron as well as other deep learning models - nonlinear activation functions such as the logistic function, $tanh$, or ReLU. A comprehensive guide for TensorFlow supported functions can be found in: https://www.tensorflow.org/versions/r0.12/api_docs/python/nn/activation_functions_. Let us build a multilayer perceptron model where the sigmoid activation function is used for the hiddern layer. Let: - $f^{(1)}(x) = W^{(1)}x + b^{(1)}$ - $f^{(2)}(x) = {1}/({1+e^{-x}})$ - $f^{(3)}(x) = W^{(2)}x + b^{(2)}$ with $W^{(1)} \in \mathbb{R}^{2\times 2}$, $b^{(1)} \in \mathbb{R}^{2\times 1}$, $W^{(2)} \in \mathbb{R}^{2\times 1}$, and $b^{(2)} \in \mathbb{R}$. End of explanation """ print(sess.run(W1),'\n') print(sess.run(b1),'\n') f1_out = sess.run(f1,{X: x_train, y: y_train}) print(f1_out,'\n') plt.scatter(f1_out[t,0],f1_out[t,1],c='b',marker='x',s=70) plt.scatter(f1_out[f,0],f1_out[f,1],c='r',marker='o',s=70) """ Explanation: The first layer $f^{(1)}(x) = W^{(1)}x + b^{(1)}$ is a linear transformation of the input, and thus cannot transform the XOR problem to a linearly separable space. Let us inspect the trained parameters $W^{(1)}$ and $b^{(1)}$, and the output of the first layer. End of explanation """ f2_out = sess.run(f2,{X: x_train, y: y_train}) print(f2_out) plt.scatter(f2_out[t,0],f2_out[t,1],c='b',marker='x',s=70) plt.scatter(f2_out[f,0],f2_out[f,1],c='r',marker='o',s=70) """ Explanation: The next layer $f^{(2)}(x)$ is the sigmoid function, which is a nonlinear transformation of the input, thus providing the possibility of transforming the problem to a new space where the outputs could be linearly separable. End of explanation """ print("y: ",sess.run(y,{X: x_train, y: y_train}),"\n") print("model: ",sess.run(y_model,{X: x_train, y: y_train})) """ Explanation: The final layer is the model output: End of explanation """
tedunderwood/changepoint
diagonal_permutation.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt from matplotlib.collections import LineCollection import csv, random import numpy as np from scipy import spatial pcafields = ['PC' + str(x) for x in range(1,15)] # Here we just create a list of strings that will # correspond to field names in the data provided # by Mauch et al. pca = list() with open('nb/quarterlypca.csv', encoding = 'utf-8') as f: reader = csv.DictReader(f) for row in reader: newpcarow = [] for field in pcafields: newpcarow.append(float(row[field])) pca.append(newpcarow) pca = np.array(pca) print(pca.shape) """ Explanation: Diagonal permutation Our goal in this notebook is to create a distance matrix that reflects the distances between particular quarters of the Billboard Hot 100, 1960-2010, and measure Foote novelty on that matrix to create a null model that will allow us to assess the significance of those Foote novelty measurements. The underlying dataset is borrowed from Mauch et al., "The Evolution of Popular Music," but the methods we develop here can, we hope, be adapted to other domains. This notebook was written up by Ted Underwood, in response to an insight about the appropriate null model suggested by Yuancheng Zhu. We begin by reading in the data, which consists of principal components of a topic model that identifies "harmonic and timbral topics" in the music. The appropriateness of that dimension-reduction is not our central concern here; we're interested in what happens after you've got points on a timeline characterized in some kind of dimension space. End of explanation """ def distance_matrix(pca): observations, dimensions = pca.shape distmat = np.zeros((observations, observations)) for i in range(observations): for j in range(observations): dist = spatial.distance.cosine(pca[i], pca[j]) distmat[i, j] = dist return distmat d = distance_matrix(pca) plt.rcParams["figure.figsize"] = [9.0, 6.0] plt.matshow(d, origin = 'lower', cmap = plt.cm.YlOrRd, extent = [1960, 2010, 1960, 2010]) plt.show() """ Explanation: Now we have an array of 200 observations, each of which is characterized by 14 variables. Let's define a function to create a distance matrix by comparing each observation against all the others. End of explanation """ def make_foote(quart): tophalf = [-1] * quart + [1] * quart bottomhalf = [1] * quart + [-1] * quart foote = list() for i in range(quart): foote.append(tophalf) for i in range(quart): foote.append(bottomhalf) foote = np.array(foote) return foote foote5 = make_foote(20) # This gives us a Foote matrix with a five-year half-width. # 5 becomes 20 because the underlying dataset has four # "quarters" of data in each year. def foote_novelty(distmat, foote): axis1, axis2 = distmat.shape assert axis1 == axis2 distsize = axis1 axis1, axis2 = foote.shape assert axis1 == axis2 halfwidth = axis1 / 2 novelties = [] for i in range(distsize): start = int(i - halfwidth) end = int(i + halfwidth) if start < 0 or end > (distsize - 1): novelties.append(0) else: novelties.append(np.sum(foote * distmat[start: end, start: end])) return novelties def getyears(): years = [] for i in range(200): years.append(1960 + i*0.25) return years years = getyears() novelties = foote_novelty(d, foote5) plt.plot(years, novelties) plt.show() print("Max novelty for a five-year half-width: " + str(np.max(novelties))) """ Explanation: So far so good; that closely resembles the distance matrix seen in Mauch et al. Now let's calculate Foote novelties. There are two parts to this process. Calculating Foote novelties is done by sliding a smaller matrix along the diagonal of the distance matrix and then multiplying elementwise. So first we have to create the smaller matrix, using the function make_foote. Then we pass that as a parameter to the function foote_novelty. Passing matrices of different size will calculate different windows of similarity. Below we define these two functions, and then calculate Foote novelties for a window with a five-year half-width. End of explanation """ randomized = np.array(pca) np.random.shuffle(randomized) randdist = distance_matrix(randomized) plt.matshow(randdist, origin = 'lower', cmap = plt.cm.YlOrRd, extent = [1960, 2010, 1960, 2010]) plt.show() """ Explanation: Testing significance Okay, now we have functions that can test Foote novelty in a distance matrix. But how do we know whether the apparent peaks and troughs in the plot above represent statistically significant variation? We need a "null model": a way of producing distance matrices that represent a random version of our data. On the other hand, we want to get the right kind of randomness. Foote novelty is sensitive to the distribution of values relative to the central diagonal timeline. So if we produce a "random model" where those values are evenly distributed, for instance by randomizing the underlying data and then calculating a distance matrix on it ... End of explanation """ def diagonal_permute(d): newmat = np.zeros((200, 200)) # We create one randomly-permuted list of integers called "translate" # that is going to be used for the whole matrix. translate = [i for i in range(200)] random.shuffle(translate) # Because distances matrices are symmetrical, we're going to be doing # two diagonals at once each time. We only need one set of values # (because symmetrical) but we need two sets of indices in the original # matrix so we know where to put the values back when we're done permuting # them. for i in range(0, 200): indices1 = [] indices2 = [] values = [] for x in range(200): y1 = x + i y2 = x - i if y1 >= 0 and y1 < 200: values.append(d[x, y1]) indices1.append((x, y1)) if y2 >= 0 and y2 < 200: indices2.append((x, y2)) # Okay, for each diagonal, we permute the values. # We'll store the permuted values in newvalues. # We also check to see how many values we have, # so we can randomly select values if needed. newvalues = [] lenvals = len(values) vallist = [i for i in range(lenvals)] for indexes, value in zip(indices1, values): x, y = indexes xposition = translate[x] yposition = translate[y] # We're going to key the randomization to the x, y # values for each point, insofar as that's possible. # Doing this will ensure that specific horizontal and # vertical lines preserve the dependence relations in # the original matrix. # But the way we're doing this is to use the permuted # x (or y) values to select an index in our list of # values in the present diagonal, and that's only possible # if the list is long enough to permit it. So we check: if xposition < 0 and yposition < 0: position = random.choice(vallist) elif xposition >= lenvals and yposition >= lenvals: position = random.choice(vallist) elif xposition < 0: position = yposition elif yposition < 0: position = xposition elif xposition >= lenvals: position = yposition elif yposition >= lenvals: position = xposition else: position = random.choice([xposition, yposition]) # If either x or y could be used as an index, we # select randomly. # Whatever index was chosen, we use it to select a value # from our diagonal. newvalues.append(values[position]) values = newvalues # Now we lay down (both versions of) the diagonal in the # new matrix. for idxtuple1, idxtuple2, value in zip(indices1, indices2, values): x, y = idxtuple1 newmat[x, y] = value x, y = idxtuple2 newmat[x, y] = value return newmat newmat = diagonal_permute(d) plt.matshow(newmat, origin = 'lower', cmap = plt.cm.YlOrRd, extent = [1960, 2010, 1960, 2010]) plt.show() """ Explanation: That is far from an apples-to-apples null model. The problem is that the original data was sequential, so distances between nearby points were usually smaller than distances between remote ones. That created the central "yellow path" running from lower left to upper right, following the diagonal timeline of quarters compared-to-themselves. We need a better null model. The one below relies on a suggestion from Yuancheng Zhu, which was to permute values of the original distance matrix, and do it only within diagonals. That way comparisons across a distance of (say) two quarters are permuted only with other two-quarter comparisons. I've added a small twist, which is to try to preserve the same underlying permutation for every diagonal (as far as possible), keying it to the x or y value for each point. That way vertically and horizontally-adjacent "pixels" of the matrix retain the same kind of "cross-hatched" correlation with each other that we saw in the original matrix. It's not perfect, but it's a reasonable approximation of a dataset where change is sequential, but randomly distributed. End of explanation """ novelties = foote_novelty(newmat, foote5) years = getyears() plt.plot(years, novelties) plt.show() print("Max novelty for five-year half-width:" + str(np.max(novelties))) def zeroless(sequence): newseq = [] for element in sequence: if element > 0.01: newseq.append(element) return newseq print("Min novelty for five-year half-width:" + str(np.min(zeroless(novelties)))) """ Explanation: What if we now try assessing foote novelties on this randomized matrix? What maximum or minimum value will we get? End of explanation """ def permute_test(distmatrix, yrwidth): footematrix = make_foote(4 * yrwidth) actual_novelties = foote_novelty(distmatrix, footematrix) permuted_peaks = [] permuted_troughs = [] for i in range(100): randdist = diagonal_permute(distmatrix) nov = foote_novelty(randdist, footematrix) nov = zeroless(nov) permuted_peaks.append(np.max(nov)) permuted_troughs.append(np.min(nov)) permuted_peaks.sort(reverse = True) permuted_troughs.sort(reverse = True) threshold05 = permuted_peaks[4] threshold01 = permuted_peaks[0] threshold95 = permuted_troughs[94] threshold99 = permuted_troughs[99] print(threshold01) print(threshold99) significance = np.ones(len(actual_novelties)) for idx, novelty in enumerate(actual_novelties): if novelty > threshold05 or novelty < threshold95: significance[idx] = 0.049 if novelty > threshold01 or novelty < threshold99: significance[idx] = 0.009 return actual_novelties, significance, threshold01, threshold05, threshold95, threshold99 def colored_segments(novelties, significance): x = [] y = [] t = [] idx = 0 for nov, sig in zip(novelties, significance): if nov > 1: x.append(idx/4 + 1960) y.append(nov) t.append(sig) idx += 1 x = np.array(x) y = np.array(y) t = np.array(t) points = np.array([x,y]).transpose().reshape(-1,1,2) segs = np.concatenate([points[:-1],points[1:]],axis=1) lc = LineCollection(segs, cmap=plt.get_cmap('jet')) lc.set_array(t) return lc, x, y novelties, significance, threshold01, threshold05, threshold95, threshold99 = permute_test(d, 5) years = [] for i in range(200): years.append(1960 + i*0.25) plt.plot(years, novelties) startpoint = years[0] endpoint = years[199] plt.hlines(threshold05, startpoint, endpoint, 'r', linewidth = 3) plt.hlines(threshold95, startpoint, endpoint, 'r', linewidth = 3) plt.show() lc, x, y = colored_segments(novelties, significance) plt.gca().add_collection(lc) # add the collection to the plot plt.xlim(1960, 2010) # line collections don't auto-scale the plot plt.ylim(y.min(), y.max()) plt.show() """ Explanation: By repeatedly running that test, we can assess the likely range of random variation. It turns out that there are only two "peaks" in the dataset that are clearly and consistently p < 0.05: one in the early eighties, and one in the earl nineties. The slowing of change at the end of the nineties is also statistically significant. End of explanation """ def zeroless_seq(thefilter, filtereda, filteredb): thefilter = np.array(thefilter) filtereda = np.array(filtereda) filteredb = np.array(filteredb) filtereda = filtereda[thefilter > 0] filteredb = filteredb[thefilter > 0] thefilter = thefilter[thefilter > 0] return thefilter, filtereda, filteredb plt.clf() plt.axis([1960, 2010, 45, 325]) novelties, significance, threshold01, threshold05, threshold95, threshold99 = permute_test(d, 5) novelties, years, significance = zeroless_seq(novelties, getyears(), significance) yplot = novelties[significance < 0.05] xplot = years[significance < 0.05] plt.scatter(xplot, yplot, c = 'red') plt.plot(years, novelties) years = getyears() startpoint = years[0] endpoint = years[199] plt.hlines(threshold05, startpoint, endpoint, 'r', linewidth = 3) plt.hlines(threshold95, startpoint, endpoint, 'r', linewidth = 3) plt.show() """ Explanation: Visualization Neither of the methods used above are terribly good as visualizations, so let's come up with a slightly better version: getting rid of the misleading "edges" and overplotting points to indicate the number of significant observations in particular periods. End of explanation """ def pacechange(startdate, enddate, pca): years = getyears() startidx = years.index(startdate) endidx = years.index(enddate) midpoint = int((startidx + endidx)/2) firsthalf = np.zeros(14) for i in range(startidx,midpoint): firsthalf = firsthalf + pca[i] secondhalf = np.zeros(14) for i in range(midpoint, endidx): secondhalf = secondhalf + pca[i] return spatial.distance.cosine(firsthalf, secondhalf) print(pacechange(1990, 1994, pca)) print(pacechange(2001, 2005, pca)) """ Explanation: Effect size What about the effect size? Foote novelty is not really a direct measurement of the pace of change. One way to measure it is, to accept the periods defined by the visualization above, and compare change across each of those periods. So, for instance, the significant points in the second peak range from 1990 to 1994, and the lowest trough is roughly 2001 to 2005. We can divide each of those periods in half, and compare the first half to the second half. It looks like Mauch et al. are roughly right about effect size: it's a sixfold difference. End of explanation """ thesum = 0 theobservations = 0 for i in range(1960, 2006): theobservations += 1 thesum += pacechange(i, i+4, pca) print(thesum / theobservations) """ Explanation: We can also get a mean value for the whole run. End of explanation """ plt.axis([1960, 2010, 0, y.max() + 10]) def add_scatter(d, width): novelties, significance, threshold01, threshold05, threshold95, threshold99 = permute_test(d, width) novelties, years, significance = zeroless_seq(novelties, getyears(), significance) yplot = novelties[significance < 0.05] xplot = years[significance < 0.05] plt.scatter(xplot, yplot, c = 'red') plt.plot(years, novelties) add_scatter(d, 3) add_scatter(d, 4) add_scatter(d, 5) plt.ylabel('Foote novelty') plt.show() """ Explanation: Comparing multiple scales at once If we wanted to, we could also overplot multiple scales of comparison with different half-widths. Doing this reveals one of the nice things about the "Foote novelty" method, which is that it remains relatively stable as you vary scales of comparison. The same cannot be said, for instance, of changepoint analysis! In the cell below we've overplotted three-year, four-year, and five-year Foote novelties, highlighting in each case the specific quarters that have two-tailed p values lower than 0.05. End of explanation """
ioam/scipy-2017-holoviews-tutorial
solutions/04-working-with-tabular-data-with-solutions.ipynb
bsd-3-clause
import numpy as np import scipy.stats as ss import pandas as pd import holoviews as hv hv.extension('bokeh') %opts Curve Scatter [tools=['hover']] """ Explanation: <a href='http://www.holoviews.org'><img src="assets/hv+bk.png" alt="HV+BK logos" width="40%;" align="left"/></a> <div style="float:right;"><h2>04. Working with Tabular Datasets</h2></div> As we have already discovered, elements are simple wrappers around your data that provide a semantically meaningful representation. Tabular data (also called columnar data) is one of the most common, general, and versatile data formats, corresponding to how data is laid out in a spreadsheet. There are many different ways to put data into a tabular format, but for interactive analysis having tidy data provides flexibility and simplicity. In this tutorial all the information you have learned in the previous sections will finally really pay off. We will discover how to facet data and use different element types to explore and visualize the data contained in a real dataset. End of explanation """ macro_df = pd.read_csv('../data/macro.csv') macro_df.head() """ Explanation: What is tabular, tidy data? End of explanation """ macro = hv.Dataset(macro_df, kdims=['country', 'year']) macro """ Explanation: For tidy data, the columns of the table represent variables or dimensions and the rows represent observations. Declaring dimensions Mathematical variables can usually be described as dependent or independent. In HoloViews these correspond to value dimensions and key dimensions (respectively). In this dataset 'country' and 'year' are independent variables or key dimensions, while the remainder are automatically inferred as value dimensions: End of explanation """ macro = macro.redim.label(growth='GDP Growth', unem='Unemployment', year='Year', country='Country') """ Explanation: We will also give the dimensions more sensible labels using redim.label: End of explanation """ curves = macro.to(hv.Curve, kdims='year', vdims='unem', groupby='country') curves """ Explanation: Mapping dimensions to elements Once we have a Dataset with multiple dimensions we can map these dimensions onto elements onto the .to method. The method takes four main arguments: The element you want to convert to The key dimensions (or independent variables to display) The dependent variables to display The dimensions to group by As a first simple example let's go through such a declaration: We will use a Curve Our independent variable will be the 'year' Our dependent variable will be 'unem' We will groupby the 'country'. End of explanation """ %%opts Bars [width=600 xrotation=45] bars = macro.sort('country').to(hv.Bars, kdims='country', vdims='unem', groupby='year') bars %%opts HeatMap [width=600 xrotation=90 tools=['hover']] # Exercise: Create a HeatMap using ``macro.to``, declaring kdims 'year' and 'country', and vdims 'growth' # You'll need to declare ``width`` and ``xrotation`` plot options for HeatMap to make the plot readable # You can also add ``tools=['hover']`` to get more info macro.to(hv.HeatMap, kdims=['year', 'country'], vdims=['growth']) """ Explanation: Alternatively we could also group by the year and view the unemployment rate by country as Bars instead: End of explanation """ %%opts BoxWhisker [width=800 xrotation=30] (box_fill_color=Palette('Category20')) macro.to(hv.BoxWhisker, 'country', 'growth', groupby=[]) %%opts BoxWhisker [width=800 xrotation=30] (box_fill_color=Palette('Category20')) # Exercise: Display the distribution of GDP growth by year using the BoxWhisker element macro.to(hv.BoxWhisker, 'year', 'growth', groupby=[]) """ Explanation: Displaying distributions Often we want to summarize the distribution of values, e.g. to reveal the distribution of unemployment rates for each OECD country across time. This means we want to ignore the 'year' dimension in our dataset, letting it be summarized instead. To stop HoloViews from grouping by the extra variable, we pass an empty list to the groupby argument. End of explanation """ %%opts Scatter [width=800 height=400 size_index='growth'] (color=Palette('Category20') size=5) %%opts NdOverlay [legend_position='left'] macro.to(hv.Scatter, 'year', ['unem', 'growth']).overlay().relabel('OECD Unemployment 1960 - 1990') # Exercise: Instead of faceting using an .overlay() of Scatter elements, facet the data using a .grid() # of Curve or Area elements macro.to(hv.Curve, 'year', ['unem', 'growth']).grid().relabel('OECD Unemployment 1960 - 1990') %%opts GridSpace [shared_yaxis=True] # Exercise: You'll notice that you get quite a lot of countries in the grid. # You can try supplying a short list of countries to the 'macro.select` method to get a more-practical subset. # Hint: You may want to pass the shared_yaxis=True plot option to GridSpace, to get a y-axis macro.select(country=['Italy', 'France', 'Sweden', 'Netherlands']).to(hv.Area, 'year', 'unem').grid() """ Explanation: Faceting dimensions In the previous section we discovered how to facet our data using the .overlay, .grid and .layout methods. Instead of working with more abstract FM modulation signals, we now have concrete variables to group by, namely the 'country' and 'year': End of explanation """ %%opts Curve [width=600] agg = macro.aggregate('year', function=np.mean, spreadfn=np.std) (hv.Curve(agg) * hv.ErrorBars(agg, kdims=['year'], vdims=['growth', 'growth_std'])) %%opts Bars [width=800 xrotation=90] # Exercise: Display aggregate GDP growth by country, building it up in a series of steps # Step 1. First, aggregate the data by country rather than by year, using # np.mean and ss.sem as the function and spreadfn, respectively, then # make a `Bars` element from the resulting ``agg`` agg = macro.aggregate('country', function=np.mean, spreadfn=ss.sem) hv.Bars(agg).sort() %%opts Bars [width=800 xrotation=90] # Step 2: You should now have a bars plot, but with no error bars. To add the error bars, # print the 'agg' as text to see which vdims are available (which will be different for # different spreadfns), then overlay ErrorBars as above but for the new kdims and # the appropriate vdims # Hint: You'll want to make the plot wider and use an xrotation to see the labels clearly hv.Bars(agg).sort() * hv.ErrorBars(agg, kdims=['country'], vdims=['growth', 'growth_sem']) """ Explanation: Aggregating Another common operation is computing aggregates. We can also compute and visualize these easily using the aggregate method. Simply supply the dimension(s) to aggregate by and supply a function and optionally a secondary function to compute the spread. Once we have computed the aggregate we can simply pass it to the Curve and ErrorBars: End of explanation """
ericmjl/Network-Analysis-Made-Simple
archive/5-graph-input-output-student.ipynb
mit
import zipfile # This block of code checks to make sure that a particular directory is present. if "divvy_2013" not in os.listdir('datasets/'): print('Unzipping the divvy_2013.zip file in the datasets folder.') with zipfile.ZipFile("datasets/divvy_2013.zip","r") as zip_ref: zip_ref.extractall('datasets') stations = pd.read_csv('datasets/divvy_2013/Divvy_Stations_2013.csv', parse_dates=['online date'], encoding='utf-8') stations.head(10) trips = pd.read_csv('datasets/divvy_2013/Divvy_Trips_2013.csv', parse_dates=['starttime', 'stoptime']) trips.head(10) """ Explanation: Tables to Networks, Networks to Tables Networks can be represented in a tabular form in two ways: As an adjacency list with edge attributes stored as columnar values, and as a node list with node attributes stored as columnar values. Storing the network data as a single massive adjacency table, with node attributes repeated on each row, can get unwieldy, especially if the graph is large, or grows to be so. One way to get around this is to store two files: one with node data and node attributes, and one with edge data and edge attributes. The Divvy bike sharing dataset is one such example of a network data set that has been stored as such. Loading Node Lists and Adjacency Lists Let's use the Divvy bike sharing data set as a starting point. The Divvy data set is comprised of the following data: Stations and metadata (like a node list with attributes saved) Trips and metadata (like an edge list with attributes saved) The README.txt file in the Divvy directory should help orient you around the data. End of explanation """ G = nx.DiGraph() """ Explanation: At this point, we have our stations and trips data loaded into memory. How we construct the graph depends on the kind of questions we want to answer, which makes the definition of the "unit of consideration" (or the entities for which we are trying to model their relationships) is extremely important. Let's try to answer the question: "What are the most popular trip paths?" In this case, the bike station is a reasonable "unit of consideration", so we will use the bike stations as the nodes. To start, let's initialize an directed graph G. End of explanation """ for d in stations.to_dict('records'): # each row is a dictionary node_id = d.pop('id') G.add_node(node_id, attr_dict=d) """ Explanation: Then, let's iterate over the stations DataFrame, and add in the node attributes. End of explanation """ # # Run the following code at your own risk :) # for r, d in trips.iterrows(): # start = d['from_station_id'] # end = d['to_station_id'] # if (start, end) not in G.edges(): # G.add_edge(start, end, count=1) # else: # G.edge[start][end]['count'] += 1 counts = trips.groupby(['from_station_id', 'to_station_id'])['trip_id'].count().reset_index() for d in counts.to_dict('records'): G.add_edge(d['from_station_id'], d['to_station_id'], count=d['trip_id']) """ Explanation: In order to answer the question of "which stations are important", we need to specify things a bit more. Perhaps a measure such as betweenness centrality or degree centrality may be appropriate here. The naive way would be to iterate over all the rows. Go ahead and try it at your own risk - it may take a long time :-). Alternatively, I would suggest doing a pandas groupby. End of explanation """ from collections import Counter # Count the number of edges that have x trips recorded on them. trip_count_distr = ______________________________ # Then plot the distribution of these plt.scatter(_______________, _______________, alpha=0.1) plt.yscale('log') plt.xlabel('num. of trips') plt.ylabel('num. of edges') """ Explanation: Exercise Flex your memory muscles: can you make a scatter plot of the distribution of the number edges that have a certain number of trips? (3 min.) The x-value is the number of trips taken between two stations, and the y-vale is be the number of edges that have that number of trips. End of explanation """ # Filter the edges to just those with more than 100 trips. G_filtered = G.copy() for u, v, d in G.edges(data=True): # Fill in your code here. len(G_filtered.edges()) """ Explanation: Exercise Create a new graph, and filter out the edges such that only those with more than 100 trips taken (i.e. count &gt;= 100) are left. (3 min.) End of explanation """ # Fill in your code here. """ Explanation: Let's now try drawing the graph. Exercise Use nx.draw_kamada_kawai(my_graph) to draw the filtered graph to screen. This uses a force-directed layout. (1 min.) End of explanation """ locs = {n: np.array([d['latitude'], d['longitude']]) for n, d in G_filtered.nodes(data=True)} # for n, d in G_filtered.nodes(data=True): # print(n, d.keys()) nx.draw_networkx_nodes(G_filtered, pos=locs, node_size=3) nx.draw_networkx_edges(G_filtered, pos=locs) plt.show() """ Explanation: Finally, let's visualize this as a GIS person might see it, taking advantage of the latitude and longitude data. End of explanation """ for n in G_filtered.nodes(): ____________ c = CircosPlot(__________) __________ plt.savefig('images/divvy.png', dpi=300) """ Explanation: Exercise Try visualizing the graph using a CircosPlot. Order the nodes by their connectivity in the original graph, but plot only the filtered graph edges. (3 min.) You may have to first annotate the connectivity of each node, as given by the number of neighbors that any node is connected to. End of explanation """ nx.write_gpickle(G, 'datasets/divvy_2013/divvy_graph.pkl') G = nx.read_gpickle('datasets/divvy_2013/divvy_graph.pkl') """ Explanation: In this visual, nodes are sorted from highest connectivity to lowest connectivity in the unfiltered graph. Edges represent only trips that were taken >100 times between those two nodes. Some things should be quite evident here. There are lots of trips between the highly connected nodes and other nodes, but there are local "high traffic" connections between stations of low connectivity as well (nodes in the top-right quadrant). Saving NetworkX Graph Files NetworkX's API offers many formats for storing graphs to disk. If you intend to work exclusively with NetworkX, then pickling the file to disk is probably the easiest way. To write to disk: nx.write_gpickle(G, handle) To load from disk: G = nx.read_gpickle(handle) End of explanation """
pranavj1001/LearnLanguages
python/DataAnalysis/numpy/NumPy.ipynb
mit
import numpy as np """ Explanation: NumPy NumPy is a library for the Python programming language, adding support for large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays. In this notebook we'll try various numpy methods and in the process learn more about NumPy. Installation Please follow this link Importing NumPy Once numpy is installed, we can import it in our file End of explanation """ list = ['a', 'b', 'c', 'd'] list np.array(list) list_matrix = [[1, 2, 3, 4, 5, 6, 7, 8, 9]] list_matrix np.array(list_matrix) """ Explanation: NumPy Arrays In NumPy, strictly 1-D arrays are known as vectors and 2-D arrays are known as matrices (a matrix can also have only one row or one column). With NumPy arrays we can get access to various pre-written functions from NumPy. Creating NumPy Arrays There are various ways to create NumPy Arrays. Some of them are listed below. From a Python List End of explanation """ np.arange(1, 11) np.arange(5, 60, 5) """ Explanation: arange method arange ( starting_number, ending_number_plus_one, step? ) Returns evenly spaced values within a given interval. step is optional End of explanation """ np.zeros(10) np.zeros((5, 5)) """ Explanation: zeros method zeros ( shape ) Returns a new array of given shape and type, filled with zeros. End of explanation """ np.ones(10) np.ones((5,5)) """ Explanation: ones method ones ( shape ) Returns a new array of given shape and type, filled with ones. End of explanation """ np.linspace(10, 20, 5) np.linspace(100, 101, 10) """ Explanation: linspace linspace ( starting_number, ending_number, number_of_elements_in_array ) Returns evenly spaced numbers over a specified interval. End of explanation """ np.eye(4) """ Explanation: eye eye ( number_of_rows ) Returns a 2-D array with ones on the diagonal and zeros elsewhere. (Returns and identity matrix) End of explanation """ np.random.rand(5) np.random.rand(3,3) np.random.rand(2,3,4) """ Explanation: Random rand rand( shape ) Returns random values in a given shape. End of explanation """ np.random.randn(3) np.random.randn(4,4) """ Explanation: randn randn ( shape ) Returns a sample (or samples) from the "standard normal" distribution. End of explanation """ np.random.randint(5) np.random.randint(1,11) np.random.randint(1, 100, 10) np.random.randint(1, 100, (4, 4)) """ Explanation: randint randint( low, high?, size? ) Returns random integers from low (inclusive) to high (exclusive). End of explanation """ list = np.arange(1,10) list.max() list.min() list.argmax() list.argmin() """ Explanation: Array Attributes max, min, argmax, argmin End of explanation """ reshape_list = np.arange(1,10) reshape_list reshape_list.reshape(3,3) """ Explanation: Reshape, Shape and Ravel reshape reshape ( shape ) Returns an array containing the same data with a new shape. End of explanation """ shape_list = np.arange(1,10) shape_list shape_list.shape shape_list.reshape(3,3) shape_list.reshape(3,3).shape """ Explanation: Shape Its an attribute which returns the shape (in a tuple) of the array. End of explanation """ ravel_list = np.arange(1,26).reshape(5,5) ravel_list ravel_list = ravel_list.ravel() ravel_list """ Explanation: ravel ravel ( ) Returns a flattened array. (does not return a new copy) End of explanation """ flatten_list = np.arange(1,26).reshape(5,5) flatten_list flatten_list = flatten_list.flatten() flatten_list """ Explanation: flatten flatten ( ) Returns a copy of the array collapsed into one dimension. End of explanation """ list = np.arange(1,11) list.dtype list = np.array(['a', 'b', 'c']) list.dtype """ Explanation: dtype Its an attribute which returns the data type of the object End of explanation """ selection_list = np.arange(1,26) selection_list selection_list[9] selection_list[24] """ Explanation: Selection index-based selection For normal 1-D lists End of explanation """ selection_list = selection_list.reshape(5, 5) selection_list selection_list[2:,1:] selection_list[1:4,2:4] """ Explanation: For multi dimensional lists End of explanation """ selection_list = selection_list.ravel() selection_list selection_list[selection_list>10] """ Explanation: comparison selectors End of explanation """ uni_list = np.arange(1,11) uni_list np.square(uni_list) np.sin(uni_list) np.log(uni_list) np.log10(uni_list) np.isfinite(uni_list) """ Explanation: Universal Functions NumPy comes with many universal functions. Some of it are in the next cells. End of explanation """
xlbaojun/Note-jupyter
05其他/pandas文档-zh-master/数据合并、连接和拼接-Merge, join, and concat.ipynb
gpl-2.0
import pandas as pd import numpy as np df1 = pd.DataFrame({'A':['A0','A1','A2','A3'], 'B':['B0','B1','B2','B3'], 'C':['C0','C1','C2','C3'], 'D':['D0','D1','D2','D3']}, index=[0,1,2,3]) df1 df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'], 'B': ['B4', 'B5', 'B6', 'B7'], 'C': ['C4', 'C5', 'C6', 'C7'], 'D': ['D4', 'D5', 'D6', 'D7']}, index=[4, 5, 6, 7]) df2 df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'], 'B': ['B8', 'B9', 'B10', 'B11'], 'C': ['C8', 'C9', 'C10', 'C11'], 'D': ['D8', 'D9', 'D10', 'D11']}, index=[8, 9, 10, 11]) df3 frames = [df1, df2, df3] #产生一个list对象 results = pd.concat(frames) results """ Explanation: 数据合并、连接和拼接 Merge, join, and concatenate 对于Series DataFrame和Panel对象,pandas提供了丰富的对象之间的连接、合并操作 专业术语-中英文对照 | 英语 | 汉语 | | ------------- |:-------------:| | merge | 合并| | join | 连接 | | concat | 拼接 | |index|索引| |list|列表| |dict|字典| pandas axis解释 官方:It specifies the axis along which the means are computed. 沿着轴操作。 默认axis=0,也就是竖轴,操作的结果是行数的增加或减少 axis=1,也就是横轴,操作的结果每一列属性增加或减少 拼接操作 Concatenating objects pandas.concat()方法实现了所有的拼接操作,即沿着一条轴将多个对象堆叠到一起。 提醒:所有使用concat()进行行拼接(axis=1)的操作,都可以转化为使用merge()完成。 concat()拼接时根据两个对象的index进行拼接,可以使用reset_index(),得到两个DataFrame,使用pd.merge(),on=index得到的列。 在讲解复杂的concat操作之前,先看一个简单的示例: pandas中concat对DataFrame操作, 由axis和join两个参数共同控制得到结果。 axis=0, 结果是行数等于两个子DataFrame的行数之和(结果的索引是两个子DF 索引的罗列,即使子DF的索引重复也无所谓)。而每一行的列数 由join参数控制,如果join=‘outer’,会对两个子DataFrame的列求并集,得到结果的每一行属性;如果join=‘inner’,会对两个子DataFrame的列求交集,得到结果的每一行属性 axis=1, 结果中每一行的属性个数等于两个子DF中的属性相加。而行数由join参数控制,具体地,对子DF的索引进行outer或inner。 End of explanation """ res = pd.concat(frames, join='inner') res """ Explanation: 使用join='inner' 查看结果 ,和outer一样! End of explanation """ df5 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'], 'B': ['B8', 'B9', 'B10', 'B11'], 'C': ['C8', 'C9', 'C10', 'C11'], 'D': ['D8', 'D9', 'D10', 'D11']}, index=[2, 3, 10, 11]) df5 df1 results2 = pd.concat([df1,df2,df5]) results2 """ Explanation: 修改df3的index,看看结果 End of explanation """ df5 = pd.DataFrame({'A': ['A2', 'A9', 'A10', 'A11'], 'B': ['B2', 'B9', 'B10', 'B11'], 'C': ['C2', 'C9', 'C10', 'C11'], 'D': ['D2', 'D9', 'D10', 'D11']}, index=[2, 3, 10, 11]) df5 results3 = pd.concat([df1,df2,df5]) results3 """ Explanation: 修改df5,使得某几行和df2一样,再看看结果 End of explanation """ #层次化索引, 用处是给列表中每个对象一个map标记,这样在结果中还能方便的调用每个子Series或DataFrame result = pd.concat(frames, keys=['x','y','z']) result """ Explanation: 如同numpy.concatenate()方法, pandas.concat方法接收一个列表或者字典对象然后进行拼接操作,注意列表或字典中每个对象要是同构的。拼接的本质就是沿着一条轴并且根据对另一条轴上进行的操作 将列表中的对象堆叠。 concat方法的参数: pd.concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False) objs: Series、DataFrame或Panel对象构成的列表或字典。如果传入一个字典对象,它的键值就作为最后结果的键值参数,除非显示说明结果的键值参数。 axis: 轴,取值范围是{0,1,...},默认值为0。concat操作时沿着的那条轴 join: 取值范围是{'inner', 'outer'},即内连接(交集)和外连接(并集)。 默认是'outer'(外连接)。连接操作决定了如何处理除axis之外的那条轴。 join_axes: 拼接操作得到的结果,它的索引取值。 keys: 序列对象,默认是None。 构建层次化索引时使用传入的keys作为最外层的索引。如果想要传入多级keys,需要以元组形式传入。 levels: 序列构成的列表,默认是None。用于构建MultiIndex。 names:列表,默认是None。层次索引中每个level的名字。 verify_integrity: 布尔变量,默认是False。用于检查新拼接的轴是否包含重复值。这个操作很耗时。 ignore_index: 布尔变量,默认是False。如果值为True,则不使用拼接轴现有的索引值,拼接结果的索引值将会设置为0,1,...,n-1。 copy: 布尔变量,默认是True。 如果取值为False,则不会复制不必要的数据。 如果不结合例子来看,上面介绍的参数真没法解释。 先来看一下keys参数 用于构建层次化索引时起到的作用。 End of explanation """ result.ix['y'] #查看df2 """ Explanation: 注意到结果中的索引是层次化的。 End of explanation """ frames = [process_your_file(f) for f in files] result = pd.concat(frames) """ Explanation: 注意:concat()方法在拼接使要复制所有的数据,因此对于它的性能你要容忍。为了方便起见,如果对多个数据集拼接,可以使用列表解析式。 End of explanation """ df4 = pd.DataFrame({'B': ['B2', 'B3', 'B6', 'B7'], 'D': ['D2', 'D3', 'D6', 'D7'], 'F': ['F2', 'F3', 'F6', 'F7']}, index=[2, 3, 6, 7]) result = pd.concat([df1, df4], axis=1) #这里axis=1 """ Explanation: 对其它轴进行逻辑运算 Set logic on the other axes 如果是对DataFrame或Panel对象进行拼接操作,你可以同时对非拼接轴进行逻辑运算。由于Series只有一个轴,所以此功能不适用于Series对象。 有三种方式能够对非拼接轴进行逻辑运算: * join='outer',取并集,这也是默认的操作,这个操作绝会有信息损失。 * join='inner',取交集。 * 利用join_axes参数。 针对上面三种方法,各举一例说明: End of explanation """ df4 = pd.DataFrame({'B': ['B2', 'B3', 'B6', 'B7'], 'D': ['D2', 'D3', 'D6', 'D7'], 'F': ['F2', 'F3', 'F6', 'F7']}, index=[3, 2, 7, 6]) #注意这里的索引值序列 df4 result = pd.concat([df1, df4], axis=1) result #结果中的索引值已排序 """ Explanation: 解释: axis=1,即拼接轴是横轴,先将df1的列和df4的列名进行堆叠,即每一行有7列。由于join='outer', 故将df1和df4的索引取取并集,得到{0,1,2,3,6,7},共6行。 还要注意结果的索引值是排好序的。 End of explanation """ result = pd.concat([df1, df4], axis=1, join='inner') result """ Explanation: 再来看看join='inner'的例子: End of explanation """ result = pd.concat([df1, df4], axis=1, join_axes=[df1.index]) """ Explanation: 解释: axis=1,即拼接轴是横轴,先将df1的列和df4的列名进行堆叠,即每一行有7列。由于join='inner', 故将df1和df4的索引取取并集,得到{2,3},共2行。 还要注意结果的索引值没有排序。 最后,看一下使用 join_axes参数的例子: End of explanation """ result = df1.append(df2) """ Explanation: 解释: axis=1,即拼接轴是横轴,先将df1的列和df4的列名进行堆叠,即每一行有7列。由于join_axes=[df1.index],故结果的索引取值和df1相同,{0,1,2,3}。 使用append()方法进行 行拼接 如果只想对Series或DataFrame对象进行行拼接(axis=0),推荐使用append()方法。 append()方法实际上就是沿着索引轴(axis=0)进行拼接的concat()。 注意: 如果是对DataFrame对象进行append操作,要注意他们的索引值交集必须为空!即每个DataFrame对象的索引值都不相同。列名不作要求。 其实,对DataFrame进行append操作时是可以忽略索引的,result = df1.append(df4, ignore_index=True) End of explanation """ result = df1.append(df4) #由于df1和df2的索引值交集不为空,导致最后result的索引值有重复! """ Explanation: End of explanation """ result = df1.append([df2,df3]) """ Explanation: append()方法可以一次拼接多个对象。 End of explanation """ df1 df4 result = pd.concat([df1, df4],ignore_index=True) result df1 df4 result2 = pd.concat([df1, df4],axis=1, ignore_index=True) #99.9999%不推荐大家列拼接时使用ignore_index result2 """ Explanation: append()操作并不是直接对df1对象进行操作,而是在df1副本的基础上进行拼接操作。 拼接操作时忽略索引值 大多数情况下,索引值都是默认生成的一些无意义的id,此时,两个DataFrame对象很可能有重复的索引值,但他们并没有实质物理含义,此时,在进行行拼接操作时我们可以忽略索引,使用ignore_index参数即可。 在进行列拼接时,就不要使用ignore_index参数了,因为99.9999%的数据列名都是有意义的。 End of explanation """ result = df1.append(df4, ignore_index=True) """ Explanation: DataFrame.append方法也有ignore_index参数哦 End of explanation """ s1 = pd.Series(['X0','X1','X2','X3','X4'],name='X') s1 result = pd.concat([df1,s1],axis=1) result """ Explanation: 同时对Series和DataFrame对象进行拼接操作 很简单,原因就是Series会被隐式转为DataFrame对象,同时保持列名相同 End of explanation """ s2 = pd.Series(['_0', '_1', '_2', '_3']) result = pd.concat([df1, s2, s2, s2], axis=1) """ Explanation: 如果Series的列没有名字,会默认生成数字用于列名。 End of explanation """ s3 = pd.Series([0, 1, 2, 3], name='foo') s4 = pd.Series([0, 1, 2, 3]) s5 = pd.Series([0, 1, 4, 5]) pd.concat([s3,s4,s5],axis=1) """ Explanation: 对Series对象进行拼接时使用keys参数 concat()方法中keys参数除了构建层次化索引外,另一个很常见的使用情景是:对多个Series对象进行拼接操作,我们希望重新制定得到的DataFrame对象中的列名,而不是使用Series中的名字。 End of explanation """ pd.concat([s3,s4,s5],axis=1,keys=['red','blue','yellos']) """ Explanation: 通过keys参数,来设置生成的DataFrame的列名 End of explanation """ result = pd.concat(frames, keys=['x', 'y', 'z']) """ Explanation: 回顾一下我们的第一个例子: End of explanation """ pieces = {'x':df1, 'y':df2, 'z':df3} result = pd.concat(pieces) result """ Explanation: 可以用字典代替上面concat方法中的frames和keys: End of explanation """ result = pd.concat(pieces, keys=['z', 'y']) #只对'z','y'对应的DataFrame进行拼接 result """ Explanation: End of explanation """ s2 = pd.Series(['X0', 'X1', 'X2', 'X3'], index=['A', 'B', 'C', 'D']) s2 df1 result = df1.append(s2, ignore_index=True) #推荐忽略现有的索引值 result dicts = [{'A': 1, 'B': 2, 'C': 3, 'X': 4}, {'A': 5, 'B': 6, 'C': 7, 'Y': 8}] result = df1.append(dicts, ignore_index=True) """ Explanation: 对DataFrame对象增加行数 你可以使用append()方法接收一个Series或者列表 对DataFrame对象增加一行,虽然这种方法效率不高,因为要重新生成一个新的DataFrame存放结果。 也可以传递一个Series构成的列表或者字典构成的列表,来增加几行! End of explanation """ left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], 'A': ['A0', 'A1', 'A2', 'A3'], 'B': ['B0', 'B1', 'B2', 'B3'] }) right = pd.DataFrame({'key': ['K0', 'K2', 'K1', 'K3'], 'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3'] }) pd.merge(left, right, on='key') """ Explanation: 数据库风格的DataFrame 连接/合并操作 pandas提供了非常高效的DataFrame连接(join)操作,并且使用风格和SQL非常类似。相对于其他的开源实现,pandas无疑是相当高效的, 这要归功于对DataFrame对象的优秀的算法设计和高效的内部数据部署。 如果你之前有过SQL开发经验而首次使用pandas,你可能想先了解二者的对比。 pandas仅提供了一个merge()方法,就能够对DataFrame对象进行类似关系数据库的连接(join)操作,先看merge()参数 merge(left, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=True, suffixes=('_x', '_y'), copy=True, indicator=False) left: 参与操作的DataFrame对象 right: 参与操作的另一个DataFrame对象 on: 进行连接操作用到的列名,可以是一列,也可以是多列。这个列名必须在left和right中都存在! 如果on参数没有赋值,并且left_index和right_index都是False,pandas会将left和right都含有的列作为on的值 left_on: 取left中某一列名作为keys,可以是列名或者长度和DataFrame对象一行长度相等的数组 right_on: 取right中某一列名作为keys, 可以是列名或者长度和DataFrame对象一行长度相等的数组 left_index: 如果取值是True,使用left中的索引作为连接操作的keys right_index: 意思同上, how: 取值范围{'left', 'right', 'outer', 'inner'},默认是inner sort: 是否对结果按照连接keys进行排序,默认是True,如果顺序不重要,可以设置为False,会提高效率 suffixes: 元组。如果left和right中除keys之外,也有列名一样,会用suffixes参数区别, 默认格式('_x', '_y'). copy:是否copy数据,默认为True,这样不会改变left和right indicator:在pandas 0.17.0中加入的参数,如果设置为True,能够显示结果中的key来自left还是right, merger方法的返回的对象类型同left。 merge()方法是pandas中的全局方法,你可以通过DataFrame对象调用它。 和merge()方法很相似的是DataFrame.join()方法,它内部使用merge方法来实现 索引与索引、索引与列 的拼接操作,默认是对索引进行拼接,而merge方法默认是对列进行拼接,无疑,对列进行拼接更常见。除非你要对索引进行拼接,通常都是使用merge()方法。 基本的数学知识:关系代数 对数据库操作熟悉的同学肯定对拼接操作不陌生,下面几种情况是必须知道的: 一对一拼接(one-to-one join): 比如对两个DF对象的索引 进行拼接操作,注意此时索引值必须是唯一的 多对一拼接 (many-to-one join): 对一个对象的索引(唯一的)和另一个对象的一列或几列进行拼接操作 多对多拼接 (many-to-many join): 对两个对象的列 进行拼接操作 注意: 显然, 多对多拼接时最常用的,并且在多对多拼接过程中会丢弃索引值。 大家有必要花时间理解多对多拼接操作的情况。在SQL/关系代数中,如果连接时的键值(key)在两个表中值不唯一,拼接结果是两个表的笛卡尔积。 看一个简单的例子来理解笛卡尔积。 End of explanation """ left = pd.DataFrame({ 'key1':['K0', 'K0', 'K1', 'K2'], 'key2':['K0', 'K1', 'K0', 'K1'], 'A':['A0', 'A1', 'A2', 'A3'], 'B':['B0', 'B1', 'B2', 'B3'] }) right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'], 'key2': ['K0', 'K0', 'K0', 'K0'], 'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}) pd.merge(left, right, on=['key1', 'key2']) """ Explanation: 解释: 拼接的键值是'key', left中key这一列的取值是{K0, k1, K2, K3},right中key这一列的取值是{K0, K1, K2, K3}, 由于how参数默认值是'inner', 即内连接,所以笛卡尔积是{K0, K1, K2, K3} 再看一个复杂点的例子, 多个列名作为key End of explanation """ result = pd.merge(left, right, how='left', on=['key1', 'key2']) """ Explanation: 解释: 此时拼接操作的键值是['key1', 'key2'],在left中['key1','key2']的取值是{['K0','K0'], ['K0', 'K1'], ['K1', 'K0'], ['K2', 'K1']},在right中['key1', 'key2']的取值是{['K0', 'K0'], ['K1', 'K0'], ['K2', 'K0']}, 默认进行内连接,所以取二者的交集{['K0', 'K0'], ['K1', 'K0']}, 并且left中['K1', 'K0']出现了两次,所以拼接结果是3行。 merge方法中的参数how决定了哪些键值(keys)会被保留到结果中。注意如果on参数接收的键值在left和right中都没有出现,拼接结果是NA。 下面总结了how参数在SQL中对应的操作。 | 参数how取值 | SQL拼接操作 | 操作描述| | ------------- |:-------------:| :-------------:| | left | LEFT OUTER JOIN| 只使用来自left中的键值| | right | RIGHT OUTER JOIN | 只使用来自right中的键值| | outer | FULL OUTER JOIN | 使用left、right中键值的并集| |inner|INNER JOIN|使用left、right中键值的交集| End of explanation """ result = pd.merge(left, right, how='right', on=['key1', 'key2']) """ Explanation: 解释: on的参数是['key1', 'key2'],所以拼接的键值是['key1', 'key2'],how参数是'left',所以进行的是左拼接,只关注left中键值取值。 过程:left中第一行键值是['K0', 'K1'],发现right有此取值,拼接后得到result中的第一行; left中第二行键值是['K0', 'K1'],发现right中没有此取值,仍进行拼接得到result中第二行,注意'C' 'D'取值NaN left中第三行键值是['K1', 'K0'], 发现right中有此取值切出现了两次,分别进行拼接得到result中第三行、第四行 left中第四行取值是['K2', 'K1'], 发现right中没有此取值,仍进行拼接得到result中第五行,'C', 'D'取值NaN End of explanation """ result = pd.merge(left, right, how='outer', on=['key1', 'key2']) """ Explanation: End of explanation """ result = pd.merge(left, right, how='inner', on=['key1', 'key2']) """ Explanation: End of explanation """ df1 = pd.DataFrame({'col1':[0,1], 'col_left':['a','b']}) df2 = pd.DataFrame({'col1':[1,2,2],'col_right':[2,2,2]}) df1 df2 pd.merge(df1, df2, on='col1', how='outer', indicator=True) """ Explanation: merge方法中indicator参数说明 0.17.0版本才有的参数 merge方法在0.17.0版本中新增了一个参数indicator。这个参数取值True,False或字符串。如果取值True,会在拼接结果中多出一列'_merge', 显示 每一行的键值来自left还是right。 | _merge值 | 含义| | ------------- |:-------------:| | left_only | 键值仅来自left| | right_only | 键值仅来自right| | both | 键值来自left和both| End of explanation """ pd.merge(df1, df2, on='col1', how='outer', indicator='indicator_column') """ Explanation: 上面提到indicator参数取值也可以是字符串,没啥特别的,就是在拼接结果中用接收的字符串作为'_merge'的列名罢了。 End of explanation """ left = pd.DataFrame({'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}, index=['K0', 'K1', 'K2']) right = pd.DataFrame({'C': ['C0', 'C2', 'C3'], 'D': ['D0', 'D2', 'D3']}, index=['K0', 'K2', 'K3']) result = left.join(right) """ Explanation: 对索引进行拼接 DataFrame.join方法的作用是对两个DataFrame对象进行基于索引值的拼接操作。下面是一个简单的例子: 把索引也看做是一列,再来看下面的操作就就没啥了 End of explanation """ result = left.join(right, how='outer') """ Explanation: End of explanation """ result = left.join(right, how='inner') """ Explanation: End of explanation """ result = pd.merge(left, right, left_index=True, right_index=True, how='outer') """ Explanation: merge方法中提供了left_index, right_index参数同样实现了基于索引的拼接操作。 End of explanation """ result = pd.merge(left, right, left_index=True, right_index=True, how='inner') """ Explanation: End of explanation """ left = pd.DataFrame({'k': ['K0', 'K1', 'K2'], 'v': [1, 2, 3]}) right = pd.DataFrame({'k': ['K0', 'K0', 'K3'], 'v': [4, 5, 6]}) result = pd.merge(left, right, on='k') """ Explanation: 除拼接键值之外,重复的列名用suffixes参数区别 很多情况下,left和right中除了拼接键值之外,还会有其他列名也相同,为了在结果中加以区分,这个时候suffixes参数就派上用场了。好消息是suffixes有默认值,即使不为它赋值在结果中也能够区分。 举例如下: End of explanation """ result = pd.merge(left, right, on='k', suffixes=['_l', '_r']) #为suffixes参数赋值 """ Explanation: 默认情况下: End of explanation """ left = left.set_index('k') right = right.set_index('k') result = left.join(right, lsuffix='_l', rsuffix='_r') """ Explanation: DataFrame.join方法也提供了lsuffix和rsuffix参数来实现同样的功能: End of explanation """ right2 = pd.DataFrame({'v': [7, 8, 9]}, index=['K1', 'K1', 'K2']) left.join([right, right2]) """ Explanation: 拼接多个DataFrame对象 DataFrame.join方法能够接收DataFrame对象构成的元组或列表,对列表内所有DF对象进行基于索引的拼接。Panel.join方法也有此功能。 End of explanation """ left = pd.DataFrame({'k': ['K0', 'K1', 'K1', 'K2'], 'lv': [1, 2, 3, 4], 's': ['a', 'b', 'c', 'd']}) right = pd.DataFrame({'k': ['K1', 'K2', 'K4'], 'rv': [1, 2, 3]}) left right pd.ordered_merge(left, right, fill_method='ffill', left_by='s') """ Explanation: 合并 有序数据 v0.18.0新增了一个ordered_merge()方法 对时间序列或者其他有序数据进行合并。并且ordered_merge提供了fill_method参数来填充缺失值。 End of explanation """ df1 = pd.DataFrame([[np.nan, 3., 5.], [-4.6, np.nan, np.nan], [np.nan, 7., np.nan]]) df1 df2 = pd.DataFrame([[-42.6, np.nan, -8.2], [-5., 1.6, 4]], index=[1, 2]) df2 """ Explanation: 使用合并方法填充NaN End of explanation """ df1.combine_first(df2) df1 """ Explanation: 先使用combine_first方法: End of explanation """ df1.update(df2) df1 """ Explanation: df1和df2列名完全相同,索引值也相似。 由于df2中不存在索引值=0,所以df1中第一行的值原封不动; 对于df1中不等于NaN的值,也不需要改变; 对于df1中等于NaN的值,用df2中同索引同列名的值替换, 得到结果。 注意combine_first()方法不改变df1的值。 如果想改变df1的值,使用update()方法。 End of explanation """
arsenovic/galgebra
examples/ipython/colored_christoffel_symbols.ipynb
bsd-3-clause
from __future__ import print_function import sys from galgebra.printer import Format, xpdf Format() from sympy import symbols, sin, pi, latex, Array, permutedims from galgebra.ga import Ga from IPython.display import Math """ Explanation: This example is kindly contributed by FreddyBaudine for reproducing pygae/galgebra#26 and pygae/galgebra#30 with modifications by utensil. Please note before Python code, there's an invisible markdown cell with the following code to enable color and define some colors from http://latexcolor.com/: markdown $$ \require{color} \definecolor{airforceblue}{rgb}{0.36, 0.54, 0.66} \definecolor{applegreen}{rgb}{0.55, 0.71, 0.0} \definecolor{atomictangerine}{rgb}{1.0, 0.6, 0.4} $$ $$ \require{color} \definecolor{airforceblue}{rgb}{0.36, 0.54, 0.66} \definecolor{applegreen}{rgb}{0.55, 0.71, 0.0} \definecolor{atomictangerine}{rgb}{1.0, 0.6, 0.4} $$ End of explanation """ from sympy import cos, sin, symbols g3coords = (x,y,z) = symbols('x y z') g3 = Ga('ex ey ez', g = [1,1,1], coords = g3coords,norm=False) # Create g3 (e_x,e_y,e_z) = g3.mv() Math(r'g =%s' % latex(g3.g)) """ Explanation: Base manifold (three dimensional) Metric tensor (cartesian coordinates - norm = False) End of explanation """ sp2coords = (theta, phi) = symbols(r'{\color{airforceblue}\theta} {\color{applegreen}\phi}', real = True) sp2param = [sin(theta)*cos(phi), sin(theta)*sin(phi), cos(theta)] sp2 = g3.sm(sp2param, sp2coords, norm = False) # submanifold (etheta, ephi) = sp2.mv() # sp2 basis vectors (rtheta, rphi) = sp2.mvr() # sp2 reciprocal basis vectors sp2grad = sp2.grad sph_map = [1, theta, phi] # Coordinate map for sphere of r = 1 Math(r'(\theta,\phi)\rightarrow (r,\theta,\phi) = %s' % latex(sph_map)) Math(r'e_\theta \cdot e_\theta = %s' % (etheta|etheta)) Math(r'e_\phi \cdot e_\phi = %s' % (ephi|ephi)) Math('g = %s' % latex(sp2.g)) Math(r'g^{-1} = %s' % latex(sp2.g_inv)) """ Explanation: Two dimensioanal submanifold - Unit sphere Basis not normalised End of explanation """ Cf1 = sp2.Christoffel_symbols(mode=1) Cf1 = permutedims(Array(Cf1), (2, 0, 1)) Math(r'\Gamma_{1, \alpha, \beta} = %s \quad \Gamma_{2, \alpha, \beta} = %s ' % (latex(Cf1[0, :, :]), latex(Cf1[1, :, :]))) Cf2 = sp2.Christoffel_symbols(mode=2) Cf2 = permutedims(Array(Cf2), (2, 0, 1)) Math(r'\Gamma^{1}_{\phantom{1,}\alpha, \beta} = %s \quad \Gamma^{2}_{\phantom{2,}\alpha, \beta} = %s ' % (latex(Cf2[0, :, :]), latex(Cf2[1, :, :]))) F = sp2.mv('F','vector',f=True) #scalar function f = sp2.mv('f','scalar',f=True) #vector function Math(r'\nabla = %s' % sp2grad) Math(r'\nabla f = %s' % (sp2.grad * f)) Math(r'F = %s' % F) Math(r'\nabla F = %s' % (sp2.grad * F)) """ Explanation: Christoffel symbols of the first kind: End of explanation """ cir_th = phi = symbols(r'{\color{atomictangerine}\phi}',real = True) cir_map = [pi/8, phi] Math(r'(\phi)\rightarrow (\theta,\phi) = %s' % latex(cir_map)) cir1d = sp2.sm( cir_map , (cir_th,), norm = False) # submanifold cir1dgrad = cir1d.grad (ephi) = cir1d.mv() Math(r'e_\phi \cdot e_\phi = %s' % latex(ephi[0] | ephi[0])) Math('g = %s' % latex(cir1d.g)) h = cir1d.mv('h','scalar',f= True) H = cir1d.mv('H','vector',f= True) Math(r'\nabla = %s' % cir1dgrad) Math(r'\nabla h = %s' %(cir1d.grad * h).simplify()) Math('H = %s' % H) Math(r'\nabla H = %s' % (cir1d.grad * H).simplify()) """ Explanation: One dimensioanal submanifold Basis not normalised End of explanation """
GoogleCloudPlatform/vertex-ai-samples
notebooks/community/sdk/SDK_Custom_Training_with_Unmanaged_Image_Dataset.ipynb
apache-2.0
!pip3 uninstall -y google-cloud-aiplatform !pip3 install google-cloud-aiplatform import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) """ Explanation: Feedback or issues? For any feedback or questions, please open an issue. Vertex SDK for Python: Custom Training Example with Unmanaged Image Dataset To use this Colaboratory notebook, you copy the notebook to your own Google Drive and open it with Colaboratory (or Colab). You can run each step, or cell, and see its results. To run a cell, use Shift+Enter. Colab automatically displays the return value of the last line in each cell. For more information about running notebooks in Colab, see the Colab welcome page. This notebook demonstrate how to create a custom model based on an image dataset. It will require you provide a bucket where the dataset will be stored. Note: you may incur charges for training, prediction, storage or usage of other GCP products in connection with testing this SDK. Install Vertex SDK for Python, Authenticate, and upload of a Dataset to your GCS bucket After the SDK installation the kernel will be automatically restarted. You may see this error message Your session crashed for an unknown reason which is normal. End of explanation """ import sys if "google.colab" in sys.modules: from google.colab import auth auth.authenticate_user() MY_PROJECT = "YOUR PROJECT" MY_STAGING_BUCKET = "gs://YOUR BUCKET" # bucket should be in same region as ucaip """ Explanation: Enter your project and GCS bucket Enter your Project Id in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. End of explanation """ from google.cloud import aiplatform aiplatform.init(project=MY_PROJECT, staging_bucket=MY_STAGING_BUCKET) """ Explanation: Initialize Vertex SDK for Python Initialize the client for Vertex AI End of explanation """ %%writefile training_script.py # Source: https://cloud.google.com/vertex-ai/docs/tutorials/image-recognition-custom import argparse import logging import os import tensorflow as tf import tensorflow_datasets as tfds IMG_WIDTH = 128 def normalize_img(image): """Normalizes image. * Resizes image to IMG_WIDTH x IMG_WIDTH pixels * Casts values from `uint8` to `float32` * Scales values from [0, 255] to [0, 1] Returns: A tensor with shape (IMG_WIDTH, IMG_WIDTH, 3). (3 color channels) """ image = tf.image.resize_with_pad(image, IMG_WIDTH, IMG_WIDTH) return image / 255. def normalize_img_and_label(image, label): """Normalizes image and label. * Performs normalize_img on image * Passes through label unchanged Returns: Tuple (image, label) where * image is a tensor with shape (IMG_WIDTH, IMG_WIDTH, 3). (3 color channels) * label is an unchanged integer [0, 4] representing flower type """ return normalize_img(image), label def get_args(): """Argument parser. Returns: Dictionary of arguments. """ parser = argparse.ArgumentParser(description='Flower classification sample') parser.add_argument( '--tfds', default=None, help='The tfds URI from https://www.tensorflow.org/datasets/ to load the data from') args = parser.parse_args() return args # Training settings args = get_args() if 'AIP_MODEL_DIR' not in os.environ: raise KeyError( 'The `AIP_MODEL_DIR` environment variable has not been' + 'set. See https://cloud.google.com/vertex-ai/docs/tutorials/image-recognition-custom/training' ) output_directory = os.environ['AIP_MODEL_DIR'] logging.info('Loading and preprocessing data ...') dataset = tfds.load(args.tfds, split='train', try_gcs=True, shuffle_files=True, as_supervised=True) dataset = dataset.map(normalize_img_and_label, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.cache() dataset = dataset.shuffle(1000) dataset = dataset.batch(128) dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) logging.info('Creating and training model ...') model = tf.keras.Sequential([ tf.keras.layers.Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_WIDTH, IMG_WIDTH, 3)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(32, 3, padding='same', activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation="relu"), tf.keras.layers.Dense(5) # 5 classes ]) model.compile( optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) model.fit(dataset, epochs=10) logging.info(f'Exporting SavedModel to: {output_directory}') # Add softmax layer for intepretability probability_model = tf.keras.Sequential([model, tf.keras.layers.Softmax()]) probability_model.save(output_directory) """ Explanation: Write your Training Script Write this cell as a file which will be used for custom training. Instead of using a managed dataset, a Tensorflow Dataset URI is passed in through the 'args' parameter of the 'run' function. The script will download the data from the URI at training time. End of explanation """ job = aiplatform.CustomTrainingJob( display_name="train-flowers-dist-1-replica", script_path="training_script.py", container_uri="gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest", requirements=["gcsfs==0.7.1"], model_serving_container_image_uri="gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest", ) model = job.run( args=["--tfds", "tf_flowers:3.*.*"], replica_count=1, model_display_name="flowers-model", ) """ Explanation: Launch a Training Job to Create a Model Once we have defined your training script, we will create a model. End of explanation """ endpoint = model.deploy(machine_type="n1-standard-4") """ Explanation: Deploy Your Model Deploy your model, then wait until the model FINISHES deployment before proceeding to prediction. End of explanation """ !gsutil -m cp -R gs://cloud-ml-data/img/flower_photos/daisy/14221848160_7f0a37c395.jpg . !gsutil -m cp -R gs://cloud-ml-data/img/flower_photos/tulips/13289268363_b9337d751e.jpg . !gsutil -m cp -R gs://cloud-ml-data/img/flower_photos/sunflowers/14623719696_1bb7970208_n.jpg . import numpy as np from PIL import Image daisy_floats = np.array(Image.open("14221848160_7f0a37c395.jpg")) small_image = np.array(Image.fromarray(np.uint8(daisy_floats)).resize((128, 128))) endpoint.predict(instances=[small_image.tolist()]) """ Explanation: Predict on the Endpoint To do a prediction you will need some flowers images. You can download some photos of flowers or use the ones provided below. End of explanation """