markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
Load the Temporal Difference Python class, TemporalDifferenceUtils():
%run ../TD0_Utils.py
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
Instantiate the class for the environment of interest:
TD0 = TemporalDifferenceUtils(env)
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
2b. SARSA: On-Policy TD(0) Control
# Define Number of Episodes n_episodes = 3e+3 # e-greedy parameters to investigate print('Determine the epsilon parameters for the epsilon-greedy policy...\n') epsilons = np.array([0.1, 0.13, 0.16]) print('epsilons: '.format(epsilons), '\n') # various step-sizes (alpha) to try print('Determine the step-sizes parameters (alphas) for the TD(0)...\n') step_sizes = np.array([0.4]) print('step_sizes: {}'.format(step_sizes), '\n') # Fixed discount discount_fixed = 1 # Create a mesh-grid of trials print('Create a dictionary of the RL-models of interest...\n') epsilons, step_sizes = np.meshgrid(epsilons, step_sizes) epsilons = epsilons.flatten() step_sizes = step_sizes.flatten() # Create a dictionary of the RL-trials of interest RL_trials = {"baseline": {'epsilon': 0.1, 'step_size': 0.5, 'discount': 1}} for n, trial in enumerate(list(zip(epsilons, step_sizes))): key = 'trial_' + str(n+1) RL_trials[key] = {'epsilon': trial[0], 'step_size': trial[1], 'discount': discount_fixed} print('Number of RL-models to try: {}\n'.format(len(RL_trials))) print('Let all RL-models to be trained for {0:,} episodes...\n'.format(int(n_episodes))) rewards_per_trial_SARSA = OrderedDict((label, np.array([])) for label, _ in RL_trials.items()) q_values_per_trial_SARSA = OrderedDict((label, np.array([])) for label, _ in RL_trials.items()) for trial, params_dict in RL_trials.items(): # Read out parameters from "params_dict" epsilon = params_dict['epsilon'] step_size = params_dict['step_size'] discount = params_dict['discount'] # Apply SARSA [on-policy TD(0) Control] q_values, tot_rewards = TD0.sarsa_on_policy_control(env, n_episodes=n_episodes, step_size=step_size, discount=discount, epsilon=epsilon) # Update "rewards_per_trial" and "q_values_per_trial" OrderedDicts rewards_per_trial_SARSA[trial] = tot_rewards q_values_per_trial_SARSA[trial] = q_values title = 'Efficiency of the RL Method\n[SARSA on-policy TD(0) Control]' plotutls.plot_learning_curve(rewards_per_trial_SARSA, title=title, lower_reward_ratio=-100) RL_trials
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
2c. Q-Learning: Off-Policy TD(0) Control
# Define Number of Episodes # n_episodes = 2e+3 # e-greedy parameters to investigate print('Determine the epsilon parameters for the epsilon-greedy policy...\n') epsilons = np.array([0.1, 0.13, 0.16]) print('epsilons: '.format(epsilons), '\n') # various step-sizes (alpha) to try print('Determine the step-sizes parameters (alphas) for the TD(0)...\n') step_sizes = np.array([0.4]) print('step_sizes: {}'.format(step_sizes), '\n') # Fixed discount discount_fixed = 1 # Create a mesh-grid of trials print('Create a dictionary of the RL-models of interest...\n') epsilons, step_sizes = np.meshgrid(epsilons, step_sizes) epsilons = epsilons.flatten() step_sizes = step_sizes.flatten() # Create a dictionary of the RL-trials of interest RL_trials = {"baseline": {'epsilon': 0.1, 'step_size': 0.5, 'discount': 1}} for n, trial in enumerate(list(zip(epsilons, step_sizes))): key = 'trial_' + str(n+1) RL_trials[key] = {'epsilon': trial[0], 'step_size': trial[1], 'discount': discount_fixed} print('Number of RL-models to try: {}\n'.format(len(RL_trials))) print('Let all RL-models to be trained for {0:,} episodes...\n'.format(int(n_episodes))) rewards_per_trial_QL = OrderedDict((label, np.array([])) for label, _ in RL_trials.items()) q_values_per_trial_QL = OrderedDict((label, np.array([])) for label, _ in RL_trials.items()) for trial, params_dict in RL_trials.items(): # Read out parameters from "params_dict" epsilon = params_dict['epsilon'] step_size = params_dict['step_size'] discount = params_dict['discount'] # Apply SARSA [on-policy TD(0) Control] q_values, tot_rewards = TD0.q_learning_off_policy(env, n_episodes=n_episodes, step_size=step_size, discount=discount, epsilon=epsilon) # Update "rewards_per_trial" and "q_values_per_trial" OrderedDicts rewards_per_trial_QL[trial] = tot_rewards q_values_per_trial_QL[trial] = q_values title = 'Efficiency of the RL Method\n[Q-Learning off-policy TD(0) Control]' plotutls.plot_learning_curve(rewards_per_trial_QL, title=title) RL_trials
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
2d. On-Policy Expected SARSA
# Define Number of Episodes # n_episodes = 2e+3 # e-greedy parameters to investigate print('Determine the epsilon parameters for the epsilon-greedy policy...\n') epsilons = np.array([0.1, 0.13, 0.16]) print('epsilons: '.format(epsilons), '\n') # various step-sizes (alpha) to try print('Determine the step-sizes parameters (alphas) for the TD(0)...\n') step_sizes = np.array([0.4]) print('step_sizes: {}'.format(step_sizes), '\n') # Fixed discount discount_fixed = 1 # Create a mesh-grid of trials print('Create a dictionary of the RL-models of interest...\n') epsilons, step_sizes = np.meshgrid(epsilons, step_sizes) epsilons = epsilons.flatten() step_sizes = step_sizes.flatten() # Create a dictionary of the RL-trials of interest RL_trials = {"baseline": {'epsilon': 0.1, 'step_size': 0.5, 'discount': 1}} for n, trial in enumerate(list(zip(epsilons, step_sizes))): key = 'trial_' + str(n+1) RL_trials[key] = {'epsilon': trial[0], 'step_size': trial[1], 'discount': discount_fixed} print('Number of RL-models to try: {}\n'.format(len(RL_trials))) print('Let all RL-models to be trained for {0:,} episodes...\n'.format(int(n_episodes))) rewards_per_trial_ExpSARSA = OrderedDict((label, np.array([])) for label, _ in RL_trials.items()) q_values_per_trial_ExpSARSA = OrderedDict((label, np.array([])) for label, _ in RL_trials.items()) for trial, params_dict in RL_trials.items(): # Read out parameters from "params_dict" epsilon = params_dict['epsilon'] step_size = params_dict['step_size'] discount = params_dict['discount'] # Apply SARSA [on-policy TD(0) Control] q_values, tot_rewards = TD0.expected_sarsa_on_policy(env, n_episodes=n_episodes, step_size=step_size, discount=discount, epsilon=epsilon) # Update "rewards_per_trial" and "q_values_per_trial" OrderedDicts rewards_per_trial_ExpSARSA[trial] = tot_rewards q_values_per_trial_ExpSARSA[trial] = q_values title = 'Efficiency of the RL Method\n[Expected SARSA on-policy TD(0) Control]' plotutls.plot_learning_curve(rewards_per_trial_ExpSARSA, title=title) RL_trials
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
3. Double Learning: a method to mitigate maximization bias 3a. Double SARSA: On-Policy TD(0) Control
# Define Number of Episodes # n_episodes = 2e+3 # e-greedy parameters to investigate print('Determine the epsilon parameters for the epsilon-greedy policy...\n') epsilons = np.array([0.1, 0.13, 0.16]) print('epsilons: '.format(epsilons), '\n') # various step-sizes (alpha) to try print('Determine the step-sizes parameters (alphas) for the TD(0)...\n') step_sizes = np.array([0.4]) print('step_sizes: {}'.format(step_sizes), '\n') # Fixed discount discount_fixed = 1 # Create a mesh-grid of trials print('Create a dictionary of the RL-models of interest...\n') epsilons, step_sizes = np.meshgrid(epsilons, step_sizes) epsilons = epsilons.flatten() step_sizes = step_sizes.flatten() # Create a dictionary of the RL-trials of interest RL_trials = {"baseline": {'epsilon': 0.1, 'step_size': 0.5, 'discount': 1}} for n, trial in enumerate(list(zip(epsilons, step_sizes))): key = 'trial_' + str(n+1) RL_trials[key] = {'epsilon': trial[0], 'step_size': trial[1], 'discount': discount_fixed} print('Number of RL-models to try: {}\n'.format(len(RL_trials))) print('Let all RL-models to be trained for {0:,} episodes...\n'.format(int(n_episodes))) rewards_per_trial_DSARSA = OrderedDict((label, np.array([])) for label, _ in RL_trials.items()) q_values_per_trial_DSARSA = OrderedDict((label, np.array([])) for label, _ in RL_trials.items()) for trial, params_dict in RL_trials.items(): # Read out parameters from "params_dict" epsilon = params_dict['epsilon'] step_size = params_dict['step_size'] discount = params_dict['discount'] # Apply SARSA [on-policy TD(0) Control] q_values_1, tot_rewards = TD0.sarsa_on_policy_control(env, n_episodes=n_episodes, step_size=step_size, discount=discount, epsilon=epsilon, double_learning=True) # Update "rewards_per_trial" and "q_values_per_trial" OrderedDicts rewards_per_trial_DSARSA[trial] = tot_rewards q_values_per_trial_DSARSA[trial] = q_values_1 title = 'Efficiency of the RL Method\n[Double SARSA: on-policy TD(0) Control]' plotutls.plot_learning_curve(rewards_per_trial_DSARSA, title=title) RL_trials
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
3b. Double Q-Learning: Off-Policy TD(0) Control
# Define Number of Episodes # n_episodes = 2e+3 # e-greedy parameters to investigate print('Determine the epsilon parameters for the epsilon-greedy policy...\n') epsilons = np.array([0.1, 0.13, 0.16]) print('epsilons: '.format(epsilons), '\n') # various step-sizes (alpha) to try print('Determine the step-sizes parameters (alphas) for the TD(0)...\n') step_sizes = np.array([0.4]) print('step_sizes: {}'.format(step_sizes), '\n') # Fixed discount discount_fixed = 1 # Create a mesh-grid of trials print('Create a dictionary of the RL-models of interest...\n') epsilons, step_sizes = np.meshgrid(epsilons, step_sizes) epsilons = epsilons.flatten() step_sizes = step_sizes.flatten() # Create a dictionary of the RL-trials of interest RL_trials = {"baseline": {'epsilon': 0.1, 'step_size': 0.5, 'discount': 1}} for n, trial in enumerate(list(zip(epsilons, step_sizes))): key = 'trial_' + str(n+1) RL_trials[key] = {'epsilon': trial[0], 'step_size': trial[1], 'discount': discount_fixed} print('Number of RL-models to try: {}\n'.format(len(RL_trials))) print('Let all RL-models to be trained for {0:,} episodes...\n'.format(int(n_episodes))) rewards_per_trial_DQL = OrderedDict((label, np.array([])) for label, _ in RL_trials.items()) q_values_per_trial_DQL = OrderedDict((label, np.array([])) for label, _ in RL_trials.items()) for trial, params_dict in RL_trials.items(): # Read out parameters from "params_dict" epsilon = params_dict['epsilon'] step_size = params_dict['step_size'] discount = params_dict['discount'] # Apply SARSA [on-policy TD(0) Control] q_values, tot_rewards = TD0.q_learning_off_policy(env, n_episodes=n_episodes, step_size=step_size, discount=discount, epsilon=epsilon, double_learning=True) # Update "rewards_per_trial" and "q_values_per_trial" OrderedDicts rewards_per_trial_DQL[trial] = tot_rewards q_values_per_trial_DQL[trial] = q_values title = 'Efficiency of the RL Method\n[Double Q-Learning: off-policy TD(0) Control]' plotutls.plot_learning_curve(rewards_per_trial_DQL, title=title, lower_reward_ratio=-100) RL_trials
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
3c. Double Expected SARSA: On-Policy TD(0) Control
# Define Number of Episodes # n_episodes = 2e+3 # e-greedy parameters to investigate print('Determine the epsilon parameters for the epsilon-greedy policy...\n') epsilons = np.array([0.1, 0.13, 0.16]) print('epsilons: '.format(epsilons), '\n') # various step-sizes (alpha) to try print('Determine the step-sizes parameters (alphas) for the TD(0)...\n') step_sizes = np.array([0.4]) print('step_sizes: {}'.format(step_sizes), '\n') # Fixed discount discount_fixed = 1 # Create a mesh-grid of trials print('Create a dictionary of the RL-models of interest...\n') epsilons, step_sizes = np.meshgrid(epsilons, step_sizes) epsilons = epsilons.flatten() step_sizes = step_sizes.flatten() # Create a dictionary of the RL-trials of interest RL_trials = {"baseline": {'epsilon': 0.1, 'step_size': 0.5, 'discount': 1}} for n, trial in enumerate(list(zip(epsilons, step_sizes))): key = 'trial_' + str(n+1) RL_trials[key] = {'epsilon': trial[0], 'step_size': trial[1], 'discount': discount_fixed} print('Number of RL-models to try: {}\n'.format(len(RL_trials))) print('Let all RL-models to be trained for {0:,} episodes...\n'.format(int(n_episodes))) rewards_per_trial_DExpSARSA = OrderedDict((label, np.array([])) for label, _ in RL_trials.items()) q_values_per_trial_DExpSARSA = OrderedDict((label, np.array([])) for label, _ in RL_trials.items()) for trial, params_dict in RL_trials.items(): # Read out parameters from "params_dict" epsilon = params_dict['epsilon'] step_size = params_dict['step_size'] discount = params_dict['discount'] # Apply SARSA [on-policy TD(0) Control] q_values, tot_rewards = TD0.expected_sarsa_on_policy(env, n_episodes=n_episodes, step_size=step_size, discount=discount, epsilon=epsilon, double_learning=True) # Update "rewards_per_trial" and "q_values_per_trial" OrderedDicts rewards_per_trial_DExpSARSA[trial] = tot_rewards q_values_per_trial_DExpSARSA[trial] = q_values title = 'Efficiency of the RL Method\n[Double Expected SARSA: on-policy TD(0) Control]' plotutls.plot_learning_curve(rewards_per_trial_DQL, title=title, lower_reward_ratio=-50) RL_trials
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
4. Comparison of SARSA, Q-Learning and Expected SARSA best models After an initial transient, Q-learning learns values for the optimal policy, the ones that travels right along the edge of the cliff. Unfortunately, this results the traveller fall-off the cliff ocasionally, because of the $\varepsilon$-greedy selection. SARSA, on the other hand, takes the action selection into account and learns the longer but safer path, through the upper part of the grid. Although, Q-learning actually learns the values of the optimal policy, its online performance is worse than that of SARSA, which learns the roundabout policy.
winning_trial = 'baseline' rewards_per_trial_best_models = OrderedDict([('Model_SARSA', np.array([])), ('Model_DSARSA', np.array([])), ('Model_QL', np.array([])), ('Model_DQL', np.array([])), ('Model_ExpSARSA', np.array([])), ('Model_DExpSARSA', np.array([]))]) rewards_per_trial_best_models['Model_SARSA'] = rewards_per_trial_SARSA[winning_trial] rewards_per_trial_best_models['Model_DSARSA'] = rewards_per_trial_DSARSA[winning_trial] rewards_per_trial_best_models['Model_QL'] = rewards_per_trial_QL[winning_trial] rewards_per_trial_best_models['Model_DQL'] = rewards_per_trial_DQL[winning_trial] rewards_per_trial_best_models['Model_ExpSARSA'] = rewards_per_trial_ExpSARSA[winning_trial] rewards_per_trial_best_models['Model_DExpSARSA'] = rewards_per_trial_DExpSARSA[winning_trial] title = 'Efficiency of the RL Method\n[SARSA vs Q-Learning and Expected SARSA Winning Models]' plotutls.plot_learning_curve(rewards_per_trial_best_models, title=title, lower_reward_ratio=-100) title = 'Efficiency of the RL Method\n[SARSA vs Q-Learning and Expected SARSA Winning Models]' plotutls.plot_learning_curve(rewards_per_trial_best_models, title=title, lower_reward_ratio=-35)
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
Note: In "CliffWalking-v0" environment the traveler can choose one of the below actions as she navigates through the grid: - "UP": denoted by 0 - "RIGHT": denoted by 1 - "DOWN": denoted by 2 - "LEFT": denoted by 3
# print optimal policy def print_optimal_policy(q_values, grid_height=4, grid_width=12): # Define a helper dictionary of actions actions_dict = {} actions = ['UP', 'RIGHT', 'DOWN', 'LEFT'] for k, v in zip(actions, range(0, len(actions))): actions_dict[k] = v # Define the position of target dstination GOAL = [3, 11] # Reshape the "q_values" table to follow grid-world dimensionality q_values = q_values.reshape((grid_height, grid_width, len(actions))) optimal_policy = [] for i in range(0, grid_height): optimal_policy.append([]) for j in range(0, grid_width): if [i, j] == GOAL: optimal_policy[-1].append('G') continue bestAction = np.argmax(q_values[i, j, :]) if bestAction == actions_dict['UP']: optimal_policy[-1].append('\U00002191') elif bestAction == actions_dict['RIGHT']: optimal_policy[-1].append('\U00002192') elif bestAction == actions_dict['DOWN']: optimal_policy[-1].append('\U00002193') elif bestAction == actions_dict['LEFT']: optimal_policy[-1].append('\U00002190') for row in optimal_policy: print(*row)
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
5. Learned Policies SARSA on-Policy TD(0) Control: Winning trial:
winning_trial = 'baseline' print_optimal_policy(q_values_per_trial_SARSA[winning_trial], grid_height=4, grid_width=12)
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
Double SARSA on-Policy TD(0) Control:
print_optimal_policy(q_values_per_trial_DSARSA[winning_trial], grid_height=4, grid_width=12)
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
Q-Learning off-Policy TD(0) Control:
print_optimal_policy(q_values_per_trial_QL[winning_trial], grid_height=4, grid_width=12)
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
Double Q-Learning off-Policy TD(0) Control:
print_optimal_policy(q_values_per_trial_DQL[winning_trial], grid_height=4, grid_width=12)
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
Expected SARSA on-Policy TD(0) Control:
print_optimal_policy(q_values_per_trial_ExpSARSA[winning_trial], grid_height=4, grid_width=12)
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
Double Expected SARSA on-Policy TD(0) Control:
print_optimal_policy(q_values_per_trial_DExpSARSA[winning_trial], grid_height=4, grid_width=12)
Reinforcement-Learning/TD0-models/02.CliffWalking.ipynb
tgrammat/ML-Data_Challenges
apache-2.0
Iris introduction course 5. Cube Plotting Learning Outcome: by the end of this section, you will be able to visualise the data stored in Iris Cubes. Duration: 30 mins Overview:<br> 5.1 Plotting Data<br> 5.2 Maps with cartopy<br> 5.3 Exercise<br> 5.4 Summary of the Section Setup
import iris
course_content/iris_course/5.Cube_Plotting.ipynb
SciTools/courses
gpl-3.0
5.1 Plotting data<a id='plotting'></a> Iris comes with two plotting modules called iris.plot and iris.quickplot that "wrap" some of the common matplotlib plotting functions, such that cubes can be passed as input rather than the usual NumPy arrays. The Iris plot routines will also pass on any other arguments and keywords to the underlying matplotlib methods. The 'plot' and 'quickplot' modules are very similar, with the primary difference being that quickplot will add extra information to the axes, such as: a colorbar, labels for the x and y axes, and a title where possible.
import iris.plot as iplt import iris.quickplot as qplt import matplotlib.pyplot as plt airtemps = iris.load_cube(iris.sample_data_path('A1B_north_america.nc')) timeseries = airtemps[-1, 20, ...] print(timeseries) qplt.plot(timeseries) plt.show()
course_content/iris_course/5.Cube_Plotting.ipynb
SciTools/courses
gpl-3.0
<div class="alert alert-block alert-warning"> <b><font color="brown">Exercise: </font></b> <p>Compare the effects of <b><font face='courier'>iplt.plot</font></b> next to <b><font face='courier'>qplt.plot</font></b> for the above data. <br>What is the visible difference?</p> </div>
# # edit space for user code ... # # SAMPLE SOLUTION # %load solutions/iris_exercise_5.1a
course_content/iris_course/5.Cube_Plotting.ipynb
SciTools/courses
gpl-3.0
Notice that, although the result of qplt has axis labels and a title, everything else about the axes is identical. The plotting functions in Iris have strict rules on the dimensionality of the inputted cubes. For example, a 2d cube will be needed in order to create a contour plot. <div class="alert alert-block alert-warning"> <b><font color="brown">Exercise: </font></b> <p>What happens if you try to apply the '<b><font face='courier'>qplt.contourf</font></b>' plot method to the 'airtemps' cube (i.e. the <i>whole</i> cube) ?</p> </div>
# # edit space for user code ... # # SAMPLE SOLUTION # %load solutions/iris_exercise_5.1b
course_content/iris_course/5.Cube_Plotting.ipynb
SciTools/courses
gpl-3.0
<div class="alert alert-block alert-warning"> <b><font color="brown">Exercise: </font></b> <p>How can you extract a 2-dimensional section of this data, to make a useful contour plot?</p> </div>
# # edit space for user code ... # # SAMPLE SOLUTION # %load solutions/iris_exercise_5.1c
course_content/iris_course/5.Cube_Plotting.ipynb
SciTools/courses
gpl-3.0
A useful alternative to contouring is to make a colour 'blockplot', which colours in each datapoint rather than drawing contours. This works well where contours would be too dense and complicated, or if you need to look at every point in the data. In matplotlib, the <a href="https://matplotlib.org/api/_as_gen/matplotlib.pyplot.pcolormesh.html"><b><font face="courier">plt.pcolormesh</font></b></a> method does this. <div class="alert alert-block alert-warning"> <b><font color="brown">Exercise: </font></b> <p>Plot the Iris equivalent of the colour blockplot method <a href="https://matplotlib.org/api/_as_gen/matplotlib.pyplot.pcolormesh.html">matplotlib.pyplot.pcolormesh</a> for the first timestep of the 'airtemps' data, i.e. <b><font face="courier" color="black">airtemps[0]</font></b>. <br>Plot just a small region, so you can see the individual data points.</p> </div>
# # edit space for user code ... # # SAMPLE SOLUTION # %load solutions/iris_exercise_5.1d
course_content/iris_course/5.Cube_Plotting.ipynb
SciTools/courses
gpl-3.0
Almost all the Iris plot methods have both iplt and qplt versions. Also, most of these have the same names as similar methods in matplotlib.pyplot. 5.2 Maps with cartopy <a id='maps'></a> When the result of a plot operation is a map, Iris will automatically create an appropriate cartopy axes if one doesn't already exist. We can use matplotlib's gca() function to get hold of the automatically created cartopy axes:
import cartopy.crs as ccrs plt.figure(figsize=(12, 8)) plt.subplot(1, 2, 1) qplt.contourf(airtemps[0, ...], 25) ax = plt.gca() ax.coastlines() ax = plt.subplot(1, 2, 2, projection=ccrs.RotatedPole(100, 37)) qplt.contourf(airtemps[0, ...], 25) ax.coastlines() plt.show()
course_content/iris_course/5.Cube_Plotting.ipynb
SciTools/courses
gpl-3.0
5.3 Section Review Exercise <a id='exercise'></a> Use the above cube, with appropriate indexing, to produce the following: 1. a contourf map on a LambertConformal projection (with coastlines)
# space for user code ... # SAMPLE SOLUTION # %load solutions/iris_exercise_5.3a
course_content/iris_course/5.Cube_Plotting.ipynb
SciTools/courses
gpl-3.0
2. a block plot (pcolormesh) map in its native projection (with coastlines)
# space for user code ... # SAMPLE SOLUTION # %load solutions/iris_exercise_5.3b
course_content/iris_course/5.Cube_Plotting.ipynb
SciTools/courses
gpl-3.0
3. a scatter plot showing air_temperature vs longitude (hint: the inputs to scatter can be a combination of coordinates or 1D cubes)
# space for user code ... # SAMPLE SOLUTION # %load solutions/iris_exercise_5.3c
course_content/iris_course/5.Cube_Plotting.ipynb
SciTools/courses
gpl-3.0
<img src="image/mean_variance.png" style="height: 75%;width: 75%; position: relative; right: 5%"> Problem 1 The first problem involves normalizing the features for your training and test data. Implement Min-Max scaling in the normalize() function to a range of a=0.1 and b=0.9. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9. Since the raw notMNIST image data is in grayscale, the current values range from a min of 0 to a max of 255. Min-Max Scaling: $ X'=a+{\frac {\left(X-X_{\min }\right)\left(b-a\right)}{X_{\max }-X_{\min }}} $ If you're having trouble solving problem 1, you can view the solution here.
# Problem 1 - Implement Min-Max scaling for grayscale image data def normalize_grayscale(image_data): """ Normalize the image data with Min-Max scaling to a range of [0.1, 0.9] :param image_data: The image data to be normalized :return: Normalized image data """ # Min-Max scaling for grayscale image data x = image_data X_MIN = 0 X_MAX = 255 A = 0.1 B = 0.9 x_prime = A + (((x - X_MIN)*(B - A)) / (X_MAX - X_MIN)) return x_prime # Test Cases np.testing.assert_array_almost_equal( normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])), [0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314, 0.125098039216, 0.128235294118, 0.13137254902, 0.9], decimal=3) np.testing.assert_array_almost_equal( normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])), [0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078, 0.896862745098, 0.9]) if not is_features_normal: train_features = normalize_grayscale(train_features) test_features = normalize_grayscale(test_features) is_features_normal = True print('Tests Passed!') if not is_labels_encod: # Turn labels into numbers and apply One-Hot Encoding encoder = LabelBinarizer() encoder.fit(train_labels) train_labels = encoder.transform(train_labels) test_labels = encoder.transform(test_labels) # Change to float32, so it can be multiplied against the features in TensorFlow, which are float32 train_labels = train_labels.astype(np.float32) test_labels = test_labels.astype(np.float32) is_labels_encod = True print('Labels One-Hot Encoded') assert is_features_normal, 'You skipped the step to normalize the features' assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels' # Get randomized datasets for training and validation train_features, valid_features, train_labels, valid_labels = train_test_split( train_features, train_labels, test_size=0.05, random_state=832289) print('Training features and labels randomized and split.') # Save the data for easy access pickle_file = 'notMNIST.pickle' if not os.path.isfile(pickle_file): print('Saving data to pickle file...') try: with open('notMNIST.pickle', 'wb') as pfile: pickle.dump( { 'train_dataset': train_features, 'train_labels': train_labels, 'valid_dataset': valid_features, 'valid_labels': valid_labels, 'test_dataset': test_features, 'test_labels': test_labels, }, pfile, pickle.HIGHEST_PROTOCOL) except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise print('Data cached in pickle file.')
1. Computer Vision and Deep Learning/L1 TensorFlow Lab/lab.ipynb
egillanton/Udacity-SDCND
mit
<img src="image/weight_biases.png" style="height: 60%;width: 60%; position: relative; right: 10%"> Problem 2 For the neural network to train on your data, you need the following <a href="https://www.tensorflow.org/resources/dims_types.html#data-types">float32</a> tensors: - features - Placeholder tensor for feature data (train_features/valid_features/test_features) - labels - Placeholder tensor for label data (train_labels/valid_labels/test_labels) - weights - Variable Tensor with random numbers from a truncated normal distribution. - See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#truncated_normal">tf.truncated_normal() documentation</a> for help. - biases - Variable Tensor with all zeros. - See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#zeros"> tf.zeros() documentation</a> for help. If you're having trouble solving problem 2, review "TensorFlow Linear Function" section of the class. If that doesn't help, the solution for this problem is available here.
features_count = 784 labels_count = 10 # Set the features and labels tensors features = tf.placeholder(tf.float32, [None, features_count]) labels = tf.placeholder(tf.float32, [None, labels_count]) # Set the weights and biases tensors weights = tf.Variable(tf.random_normal([features_count, labels_count])) biases = tf.Variable(tf.zeros(labels_count)) ### DON'T MODIFY ANYTHING BELOW ### #Test Cases from tensorflow.python.ops.variables import Variable assert features._op.name.startswith('Placeholder'), 'features must be a placeholder' assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder' assert isinstance(weights, Variable), 'weights must be a TensorFlow variable' assert isinstance(biases, Variable), 'biases must be a TensorFlow variable' assert features._shape == None or (\ features._shape.dims[0].value is None and\ features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect' assert labels._shape == None or (\ labels._shape.dims[0].value is None and\ labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect' assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect' assert biases._variable._shape == (10), 'The shape of biases is incorrect' assert features._dtype == tf.float32, 'features must be type float32' assert labels._dtype == tf.float32, 'labels must be type float32' # Feed dicts for training, validation, and test session train_feed_dict = {features: train_features, labels: train_labels} valid_feed_dict = {features: valid_features, labels: valid_labels} test_feed_dict = {features: test_features, labels: test_labels} # Linear Function WX + b logits = tf.matmul(features, weights) + biases prediction = tf.nn.softmax(logits) # Cross entropy cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), axis=1) # Training loss loss = tf.reduce_mean(cross_entropy) # Create an operation that initializes all variables init = tf.global_variables_initializer() # Test Cases with tf.Session() as session: session.run(init) session.run(loss, feed_dict=train_feed_dict) session.run(loss, feed_dict=valid_feed_dict) session.run(loss, feed_dict=test_feed_dict) biases_data = session.run(biases) assert not np.count_nonzero(biases_data), 'biases must be zeros' print('Tests Passed!') # Determine if the predictions are correct is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1)) # Calculate the accuracy of the predictions accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32)) print('Accuracy function created.')
1. Computer Vision and Deep Learning/L1 TensorFlow Lab/lab.ipynb
egillanton/Udacity-SDCND
mit
<img src="image/learn_rate_tune.png" style="height: 60%;width: 60%"> Problem 3 Below are 3 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy. Parameter configurations: Configuration 1 * Epochs: 1 * Batch Size: * 2000 * 1000 * 500 * 300 * 50 * Learning Rate: 0.01 Configuration 2 * Epochs: 1 * Batch Size: 100 * Learning Rate: * 0.8 * 0.5 * 0.1 * 0.05 * 0.01 Configuration 3 * Epochs: * 1 * 2 * 3 * 4 * 5 * Batch Size: 100 * Learning Rate: 0.2 The code will print out a Loss and Accuracy graph, so you can see how well the neural network performed. If you're having trouble solving problem 3, you can view the solution here.
# Find the best parameters for each configuration # Configuration 1 # epochs = 1 # batch_size = 50 # learning_rate = 0.01 # Configuration 2 # epochs = 1 # batch_size = 100 # learning_rate = 0.1 # Configuration 3 epochs = 5 batch_size = 100 learning_rate = 0.2 ### DON'T MODIFY ANYTHING BELOW ### # Gradient Descent optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) # The accuracy measured against the validation set validation_accuracy = 0.0 # Measurements use for graphing loss and accuracy log_batch_step = 50 batches = [] loss_batch = [] train_acc_batch = [] valid_acc_batch = [] with tf.Session() as session: session.run(init) batch_count = int(math.ceil(len(train_features)/batch_size)) for epoch_i in range(epochs): # Progress bar batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches') # The training cycle for batch_i in batches_pbar: # Get a batch of training features and labels batch_start = batch_i*batch_size batch_features = train_features[batch_start:batch_start + batch_size] batch_labels = train_labels[batch_start:batch_start + batch_size] # Run optimizer and get loss _, l = session.run( [optimizer, loss], feed_dict={features: batch_features, labels: batch_labels}) # Log every 50 batches if not batch_i % log_batch_step: # Calculate Training and Validation accuracy training_accuracy = session.run(accuracy, feed_dict=train_feed_dict) validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict) # Log batches previous_batch = batches[-1] if batches else 0 batches.append(log_batch_step + previous_batch) loss_batch.append(l) train_acc_batch.append(training_accuracy) valid_acc_batch.append(validation_accuracy) # Check accuracy against Validation data validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict) loss_plot = plt.subplot(211) loss_plot.set_title('Loss') loss_plot.plot(batches, loss_batch, 'g') loss_plot.set_xlim([batches[0], batches[-1]]) acc_plot = plt.subplot(212) acc_plot.set_title('Accuracy') acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy') acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy') acc_plot.set_ylim([0, 1.0]) acc_plot.set_xlim([batches[0], batches[-1]]) acc_plot.legend(loc=4) plt.tight_layout() plt.show() print('Validation accuracy at {}'.format(validation_accuracy))
1. Computer Vision and Deep Learning/L1 TensorFlow Lab/lab.ipynb
egillanton/Udacity-SDCND
mit
Test Set the epochs, batch_size, and learning_rate with the best learning parameters you discovered in problem 3. You're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.
# Set the epochs, batch_size, and learning_rate with the best parameters from problem 3 epochs = 5 batch_size = 30 learning_rate = 0.1 # Test Accuracy is 0.8722000122070312 ### DON'T MODIFY ANYTHING BELOW ### # The accuracy measured against the test set test_accuracy = 0.0 with tf.Session() as session: session.run(init) batch_count = int(math.ceil(len(train_features)/batch_size)) for epoch_i in range(epochs): # Progress bar batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches') # The training cycle for batch_i in batches_pbar: # Get a batch of training features and labels batch_start = batch_i*batch_size batch_features = train_features[batch_start:batch_start + batch_size] batch_labels = train_labels[batch_start:batch_start + batch_size] # Run optimizer _ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels}) # Check accuracy against Test data test_accuracy = session.run(accuracy, feed_dict=test_feed_dict) assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy) print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
1. Computer Vision and Deep Learning/L1 TensorFlow Lab/lab.ipynb
egillanton/Udacity-SDCND
mit
Mauricio's 1D problem in 2D We first define the model.
M = 50 m = np.zeros((M+1, 1)) # Have to do +1 because we're going to lose one in computing RC series. m[10:15,:] = 1.0 m[15:27,:] = -0.3 m[27:35,:] = 2.1 m = (m[1:] - m[:-1]) / (m[1:] + m[:-1] + 1e-9) # Small number avoid division by zero. m = np.repeat(m, 50, axis=-1) plt.imshow(m, cmap='viridis')
NumPy_2D.ipynb
kwinkunks/axb
apache-2.0
Form the discrete kernel, G.
N = 20 L = 100 alpha = 0.08 x = np.arange(0, M, 1) * L/(M-1) dx = L/(M-1) r = np.arange(0, N, 1) * L/(N-1) G = np.zeros((N, M)) for j in range(M): for k in range(N): G[k,j] = dx * np.exp(-alpha * np.abs(r[k] - x[j])**2)
NumPy_2D.ipynb
kwinkunks/axb
apache-2.0
Compute the data; this is the forward problem.
d = G @ m plt.imshow(d, cmap='viridis')
NumPy_2D.ipynb
kwinkunks/axb
apache-2.0
Minimum least squares solution:
m_est = G.T @ la.inv(G @ G.T) @ d d_pred = G @ m_est plot_all(m, d, m_est, d_pred)
NumPy_2D.ipynb
kwinkunks/axb
apache-2.0
We can also just use NumPy's solver directly.
m_est = la.lstsq(G, d)[0] d_pred = G @ m_est plot_all(m, d, m_est, d_pred)
NumPy_2D.ipynb
kwinkunks/axb
apache-2.0
More complex models
from io import BytesIO import requests from urllib.parse import quote text = '+ =' url = "https://chart.googleapis.com/chart" params = {'chst': 'd_text_outline', 'chld': '000000|36|h|000000|_|{}'.format(text), } r = requests.get(url, params) b = BytesIO(r.content) img = mpimg.imread(b) m = np.pad(img[...,3], 20, 'constant') plt.imshow(m, cmap='viridis') m = (m[1:] - m[:-1]) / (m[1:] + m[:-1] + 1e-9) # Small number avoid division by zero.
NumPy_2D.ipynb
kwinkunks/axb
apache-2.0
I changed the shape of m so we have to make a new G.
M = m.shape[0] N = 20 L = 100 alpha = 0.08 x = np.arange(0, M, 1) * L/(M-1) dx = L/(M-1) r = np.arange(0, N, 1) * L/(N-1) G = np.zeros((N, M)) for j in range(M): for k in range(N): G[k,j] = dx * np.exp(-alpha * np.abs(r[k] - x[j])**2)
NumPy_2D.ipynb
kwinkunks/axb
apache-2.0
Forward model the data.
d = G @ m plt.imshow(d, cmap='viridis')
NumPy_2D.ipynb
kwinkunks/axb
apache-2.0
Noise-free: minimum norm
m_est = G.T @ la.inv(G @ G.T) @ d d_pred = G @ m_est plot_all(m, d, m_est, d_pred)
NumPy_2D.ipynb
kwinkunks/axb
apache-2.0
Solve with LAPACK
m_est = la.lstsq(G, d)[0] d_pred = G @ m_est plot_all(m, d, m_est, d_pred)
NumPy_2D.ipynb
kwinkunks/axb
apache-2.0
With noise: damped least squares
s = 1 d += s * np.random.random(d.shape) I = np.eye(N) µ = 2.5 m_est = G.T @ la.inv(G @ G.T + µ * I) @ d d_pred = G @ m_est plot_all(m, d, m_est, d_pred)
NumPy_2D.ipynb
kwinkunks/axb
apache-2.0
With noise: damped least squares with first derivative regularization
W = convmtx([1,-1], M)[:,:-1] # Skip last column
NumPy_2D.ipynb
kwinkunks/axb
apache-2.0
Selecting k top scoring features (also dimensionality reduction) Considered unsupervised learning
# SelectKBest for selecting top-scoring features from sklearn import datasets from sklearn.feature_selection import SelectKBest, chi2 iris = datasets.load_iris() X, y = iris.data, iris.target print(X.shape) # Do feature selection # input is scoring function (here chi2) to get univariate p-values # and number of top-scoring features (k) - here we get the top 2 X_t = SelectKBest(chi2, k = 2).fit_transform(X, y) print(X_t.shape)
03.Feature Engineering.ipynb
karthikrangarajan/intro-to-sklearn
bsd-3-clause
Als Lösung ("Fixpunkt"), bei dem linke und rechte Seite denselben Wert annehmen lässt sich für $\lambda$ ein Wert von etwa 0,0085 ablesen. Mithilfe einer Fixpunktiteration lässt sich dieser auch iterativ bestimmen. Hierzu formen wir die Prandtl-Formel so um, dass auf der linken Seite nur noch $\lambda$ steht: $$\lambda = \frac{1}{\left[RHS\left(\lambda\right)\right]^2}$$ Damit lässt sich nun eine Iterationsvorschrift formulieren: $$\lambda_{i+1} = \frac{1}{\left[RHS\left(\lambda_i\right)\right]^2}$$
# Startwert für lambda: lamb_alt = 100 # Liste, um Zwischenergebnisse zu speichern lambda_i = [] lambda_i.append(lamb_alt) # Fixpunkt-Algorithmus for iteration in range(0, 5): lamb_neu = 1 / (RHS(lamb_alt, Re)**2) lambda_i.append(lamb_neu) lamb_alt = lamb_neu fehler = (RHS(lamb_neu, Re)-LHS(lamb_neu)) / RHS(lamb_neu, Re) plt.plot(lamb, LHS(lamb)) plt.plot(lamb, RHS(lamb, Re)) plt.plot(lambda_i, LHS(lambda_i), 'o') for i, txt in enumerate(lambda_i): plt.annotate(i, (lambda_i[i], LHS(lambda_i[i]))) plt.axis([0, 0.02, 5, 15]) plt.ylabel('LHS, RHS') plt.xlabel('Rohrreibungszahl $\lambda$') plt.show(); print ("lambda = ", lamb_neu, ", Fehler in %: ", fehler*100)
3_1-Numerik_Iterative_Verfahren.ipynb
HsKA-ThermalFluiddynamics/NSS-1
mit
Newton-Verfahren Das Newton-Verfahren ist eine weitere Möglichkeit, um Gleichungen iterativ zu lösen. Hierzu wird die Gleichung so umgestellt, dass sich das Problem in eine Nullstellensuche konvertiert. Die oben behandelte Rohrreibungsgleichung ergibt dann: $$f(\lambda) = \frac{1}{\sqrt\lambda} - 2\cdot\log\left(\text{Re}\cdot\sqrt\lambda\right) + 0,8 = 0$$ Ausgehend von einem geschätzten Startwert für die Nullstelle $\lambda_i$ wird die Steigung $f'(\lambda_i)$ der Funktion berechnet. Die Tangente im Punkt $(\lambda_i,f(\lambda_i))$ ist dann: $$t(\lambda) = f(\lambda_i) + f'(\lambda_i)\cdot (\lambda - \lambda_i)$$ Der Schnittpunkt dieser Tangente mit der $\lambda$-Achse ergibt den neuen Näherungswert für die Nullstelle und damit die Iterationsvorschrift: $$\lambda_{i+1} = \lambda_i - \frac{f(\lambda_i)}{f'(\lambda_i)}$$ Im Beispiel mit der Rohrreibungsgleichung ist die Ableitung: $$f'(\lambda) = -\frac{1}{2\cdot\lambda^{3/2}} - \frac{1}{\lambda}$$
# Startwert für lambda: lamb_alt = 0.01 # Liste, um Zwischenergebnisse zu speichern lambda_newton_i = [] lambda_newton_i.append(lamb_alt) # die Funktion f def f(lamb, Re): return 1/np.sqrt(lamb) - 2.0 * np.log10(Re * np.sqrt(lamb)) + 0.8 # die Ableitung von f def f_strich(lamb): return -1/(2*lamb**1.5) - 1/(lamb*math.log(10)) # die Tangente an f (nur zur Visualisierung, wird eigentlich nicht benötigt) def tangente_f(x, lamb, Re): return f(lamb,Re)+f_strich(lamb)*(x-lamb) # Newton-Verfahren: for iteration in range(0, 15): lamb_neu = lamb_alt - f(lamb_alt, Re)/f_strich(lamb_alt) lambda_newton_i.append(lamb_neu) lamb_alt = lamb_neu fehler = (RHS(lamb_neu, Re)-LHS(lamb_neu)) / RHS(lamb_neu, Re) # Ergebnisse im Diagramm darstellen: plt.plot(lamb, f(lamb, Re)) plt.plot(lambda_newton_i, f(lambda_newton_i, Re), 'o') for i, txt in enumerate(lambda_newton_i): plt.annotate(i, (lambda_newton_i[i], f(lambda_newton_i[i], Re))) plt.plot(lamb, tangente_f(lamb, lambda_newton_i[i], Re)) plt.plot([0,0.02],[0,0],'k', linewidth=1) plt.axis([0, 0.02, -4, 6]) plt.ylabel('f = LHS - RHS') plt.xlabel('Rohrreibungszahl $\lambda$') plt.show(); print ("lambda = ", lamb_neu, ", Fehler in %: ", fehler*100)
3_1-Numerik_Iterative_Verfahren.ipynb
HsKA-ThermalFluiddynamics/NSS-1
mit
Verfahren zur Lösung von Gleichungssystemen Weitere Verfahren, zur Lösung von ganzen Gleichungssystemen werden in Kapitel 4 vorgestellt. Diese Verfahren werden z.B. verwendet, um die riesigen Gleichungssysteme zu lösen, die bei der Diskretisierung von Transportgleichungen mithilfe der Finite-Differenzen- (FDM), Finite-Elemente- (FEM) oder Finite-Volumen-Methode (FVM) entstehen. Prominente Vertreter sind das Gauß-Verfahren (Gauß-Seidel-Verfahren) und der Thomas-Algorithmus. Hier geht's weiter oder hier zurück zur Übersicht. Copyright (c) 2018, Florian Theobald und Matthias Stripf Der folgende Python-Code darf ignoriert werden. Er dient nur dazu, die richtige Formatvorlage für die Jupyter-Notebooks zu laden.
from IPython.core.display import HTML def css_styling(): styles = open('TFDStyle.css', 'r').read() return HTML(styles) css_styling()
3_1-Numerik_Iterative_Verfahren.ipynb
HsKA-ThermalFluiddynamics/NSS-1
mit
Fitting a model to data with outliers using MCMC We are often faced with data with spurious outliers. For example, a light curve generated from some automated photometric pipeline with failures in the background subtraction on some nights. It is tempting to remove these outliers by eye, or by some automatic procedure (e.g., sigma clipping), however these points may actually contain valuable information. Your model should be able to account for these data! Luckily, it is relatively easy to account for this within a statistical model: the data are modeled as being either drawn from your physical model, or an outlier model with an extra parameter to specify the global weights of these two distributions. Below we consider a trivial but illustrative example. This problem is described in a little more detail in Section 5.6.7 of Statistics, Data Mining, and Machine Learning in Astronomy. Another good reference is David Hogg's Fitting a model to data paper. Example data We take images of a variable star at random times over the course of a year. We have some automated pipeline that reduces the data, and spits out magnitudes for each observation. Some nights, the pipeline fails, but the way in which we estimate uncertainties on the measured magnitudes doesn't account for these failures, leading to outliers in the final data product (a light curve). For now, we'll assume the shape of the light curve is a perfect sinusoid, but in principle any template could be used. Our goal will be to measure the amplitude, period, and phase of the sinusoid given noisy observations of the star. We start by definining some helper functions to generate the fake data:
def sinusoid(t, amp, period, phase): """ A generic sinusoidal curve. 'period' and 't' should have the same units (e.g., days), and phase should be in radians. Parameters ---------- t : array_like Array of times. amp : numeric Amplitude of the sinusoid. period : numeric Period of the sinusoid. phase : numeric Phase of the sinusoid. """ return amp*np.sin(2*np.pi*t/period + phase) def light_curve_model(p, t): """ Our model for the variable star light curve will be a pure sinusoid plus some constant offset (the mean magnitude of the star). The function takes a single array of parameters, p, and an array of times, t. By structuring the function input parameters this way, we can use this function to both generate and later fit the data. Parameters ---------- p : iterable A list, tuple, or array of model parameter values. For example, a tuple of (amplitude, period, phase, mean mag.). t : array_like Array of times. """ amp, period, phase, const = p return sinusoid(t, amp, period, phase) + const
Fitting a model with outliers using MCMC.ipynb
adrn/stats-tutorials
mit
Now we'll actually generate the fake data. The cell below contains tunable parameters -- if you want to change the number of data points, the fraction of points that are outliers, or the true model parameters, modify the variables in this cell (you'll have to download the full version of this notebook):
ndata_points = 32 # number of data points outlier_fraction = 0.1 # 10% of the points will be outliers true_amplitude = 1.5 # mag true_period = 112. # days true_phase = 1.5 # radians true_mean_mag = 14. # mag
Fitting a model with outliers using MCMC.ipynb
adrn/stats-tutorials
mit
# pack the true parameters into a single tuple true_params = (true_amplitude, true_period, true_phase, true_mean_mag) # generate an array of observation times time = np.random.uniform(0., 365., size=ndata_points) time.sort() # generate magnitude values from the model at the observation times mag = light_curve_model(true_params, time) # each data point will have a different uncertainty, sampled from # a uniform distribution between 0.2 and 0.4 magnitudes mag_err = np.random.uniform(0.2, 0.4, size=ndata_points) # pick outlier points based on the set outlier_fraction. we generate a # boolean array (array of True's and False's) -- when a given index # is True, that point will become an outlier outlier_idx = np.random.uniform(size=ndata_points) < outlier_fraction # for the outlier points, add large scatter mag[outlier_idx] += np.random.normal(0., 5., size=sum(outlier_idx)) # for the non-outlier points, add scatter based on the uncertainty array (mag_err). # the twiddle (~) means 'logical not' - (True becomes False, False becomes True) mag[~outlier_idx] += np.random.normal(0., mag_err[~outlier_idx]) plt.figure(figsize=(12,4)) plt.errorbar(time, mag, mag_err, marker='o', linestyle='none', ecolor='#aaaaaa') plt.xlim(0,365) plt.xlabel("Time [day]") plt.ylabel("Magnitude")
Fitting a model with outliers using MCMC.ipynb
adrn/stats-tutorials
mit
Now we're armed with some fake data in the form of 3 arrays: the observation times, magnitudes, and uncertainties for the magnitudes (we'll assume we can measure the time of observation to arbitrary precision). We'll start by trying to fit our light curve model without taking into account the outliers. We're going to use the same model that we used to generate the data, but ignore the fact that there are outlier points. If we assume that each data point is independent and has Gaussian uncertainties (as we have done by construction), we can write down a likelihood for each $i$ data point as a Gaussian: $$ p(m_i \,\vert\, A,T,\phi,m_0,\sigma_{m,i},t_i) = \frac{1}{\sqrt{2\pi\sigma_{m,i}^2}} \exp \left(-\frac{(m_i - f(t_i, A, T, \phi, m_0))^2}{2\sigma_{m,i}^2}\right) $$ where our light curve model is expressed as the function $f$: $$ f(t_i, A, T, \phi, m_0) = A\sin(\frac{2\pi t_i}{T}+\phi) - m_0 $$ This looks just like a $\chi^2$ function because we are assuming our data are drawn from a Gaussian around some true model with the parameters amplitude, $A$, period, $T$, phase, $\phi$, and mean magnitude, $m_0$. The full likelihood (over all data points) is then the product of these of the individual probabilities: $$ \mathcal{L} = \prod_i^N p(m_i \,\vert\, A,T,\phi,m_0,\sigma_{m,i},t_i) = p({m_i}N \,\vert\, A,T,\phi,m_0,{\sigma{m,i}}_N,{t_i}_N) $$ where the notation ${m_i}_N$ means the set of all $N$ data points. For this example, we will assume uniform priors over all parameters, $p(A)\,p(T)\,p(\phi)\,p(m_0)$. It is a bad idea to define uniform priors over angular parameters (like the phase, $\phi$, in this example), but I've done it anyway for simplicity. In this case, we could just maximize this likelihood to find the "best fit" parameters, but instead we will use MCMC to sample from the posterior probability over the model parameters. The full posterior probability for our model is then given by: $$ p(A,T,\phi,m_0 \,\vert\, {m_i}N, {\sigma{m,i}}N, {t_i}_N) = \frac{1}{\mathcal{Z}} p({m_i}_N \,\vert\, A,T,\phi,m_0,{\sigma{m,i}}_N,{t_i}_N)\,p(A)\,p(T)\,p(\phi)\,p(m_0) $$ where the constant, $\mathcal{Z}$, only depends on the properties of the data -- which we are not varying -- and thus we can ignore it for optimization. In practice, evaluating the logarithm of the posterior is often more stable to numerical issues (e.g., overflow or underflow), so below we define functions to evaluate the log of all of the above probabilities. This, combined with using uniform priors means that the priors evaluate to 0 when a value is outside of the specified range and are constant within the defined range.
def lnprior(p): amp,period,phase,const = p if amp < 1 or amp > 2: return -np.inf if period < 10 or period > 200: return -np.inf if phase < 0. or phase > 2*np.pi: return -np.inf if const < 12 or const > 16: return -np.inf return 0. def lnlikelihood(p, t, data, err): amp,period,phase,const = p return -np.log(err) - 0.5*( (data - sinusoid(t, amp, period, phase) - const) / err )**2 def lnprob(p, t, data, err): return lnprior(p) + np.sum(lnlikelihood(p, t, data, err)) ndim, nwalkers = 4, 32 p0 = np.zeros((nwalkers,ndim)) p0[:,0] = np.random.uniform(1, 2., size=nwalkers) # amp p0[:,1] = np.random.uniform(10, 200., size=nwalkers) # period (days) p0[:,2] = np.random.uniform(0., 2*np.pi, size=nwalkers) # phase (radians) p0[:,3] = np.random.uniform(12., 16., size=nwalkers) # const. offset (mag) sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(time,mag,mag_err)) pos, prob, state = sampler.run_mcmc(p0, 1000) best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()] pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers) sampler.reset() pos, prob, state = sampler.run_mcmc(pos, 100) sampler.reset() pos, prob, state = sampler.run_mcmc(pos, 1000) param_labels = ["Amp.", "Period [day]", "Phase [rad]", "Mean mag."] extents = [(0.5*truth,1.5*truth) for truth in true_params] fig = triangle.corner(sampler.flatchain, labels=param_labels, truths=true_params, range=extents)
Fitting a model with outliers using MCMC.ipynb
adrn/stats-tutorials
mit
Interesting -- we almost nailed the mean magnitude, but period, phase, and amplitude are biased (by many percent). Let's draw some samples from this posterior and plot the models over the data:
nsamples = sampler.flatchain.shape[0] plt.figure(figsize=(12,4)) plt.errorbar(time, mag, mag_err, marker='o', linestyle='none', ecolor='#aaaaaa') t = np.linspace(min(time), max(time), 1000) for ii in range(10): idx = np.random.randint(0, nsamples) params = sampler.flatchain[idx] model_mag = light_curve_model(params, t) plt.plot(t, model_mag, marker=None, color='#f03b20', alpha=0.4) plt.xlim(min(time), max(time))
Fitting a model with outliers using MCMC.ipynb
adrn/stats-tutorials
mit
What can we do about this? We can define a mixture model! We add a new parameter to the model: the fraction of stars that are instead drawn from some very broad outlier distribution. There are many possible options for that to put in for this outlier distribution, but below we will assume that the outlier distribution is still centered on the model, but with a huge variance. Our "inlier" distribution will be the same as above: $$ p_{\rm in} = p(m_i \,\vert\, A,T,\phi,m_0,\sigma_{m,i},t_i) = \frac{1}{\sqrt{2\pi\sigma_{m,i}^2}} \exp \left(-\frac{(m_i - f(t_i, A, T, \phi, m_0))^2}{2\sigma_{m,i}^2}\right) $$ and our outlier distribution: $$ p_{\rm out} = p(m_i \,\vert\, A,T,\phi,m_0,V,t_i) = \frac{1}{\sqrt{2\pi V}} \exp \left(-\frac{(m_i - f(t_i, A, T, \phi, m_0))^2}{2 V}\right) $$ where $V$ is arbitrarily fixed to be 10 times the median uncertainty of the data. The new likelihood for a single point is then: $$ \mathcal{L}i = (1-f{\rm bad})p_{\rm in} + f_{\rm bad}p_{\rm out} $$ $f_{\rm out}$ represents the fraction of the data points that are outliers, and this will be a new parameter in the model.
def lnprior(p): amp = p[0] period = p[1] phase = p[2] const = p[3] fout = p[4] if amp < 1 or amp > 2: return -np.inf if period < 100 or period > 200: return -np.inf if phase < 0. or phase > 2*np.pi: return -np.inf if const < 12 or const > 16: return -np.inf if fout > 1. or fout < 0.: return -np.inf return 0. def ln_model_likelihood(p, t, data, err): amp, period, phase, const, outlier_prob = p term = -np.log(err) - 0.5*( (data - sinusoid(t, amp, period, phase) - const) / err )**2 return term def ln_outlier_likelihood(p, t, data, err): amp, period, phase, const, outlier_prob = p outlier_err = 10.*np.median(err) term = -np.log(outlier_err) - 0.5*( (data - sinusoid(t, amp, period, phase) - const) / outlier_err )**2 return term def lnlikelihood(p, t, data, err): amp, period, phase, const, fout = p term1 = ln_model_likelihood(p, t, data, err) term2 = ln_outlier_likelihood(p, t, data, err) b = np.ones((2,len(t))) b[0] = 1. - fout b[1] = fout return logsumexp(np.vstack((term1, term2)), b=b, axis=0) def lnprob(p, t, data, err): prior = lnprior(p) if np.isinf(prior): return -np.inf return prior + np.sum(lnlikelihood(p, t, data, err)) ndim, nwalkers = 5, 64 p0 = np.zeros((nwalkers,ndim)) p0[:,0] = np.random.uniform(1, 2., size=nwalkers) # amp p0[:,1] = np.random.uniform(100, 200., size=nwalkers) # period (days) p0[:,2] = np.random.uniform(0., 2*np.pi, size=nwalkers) # phase (radians) p0[:,3] = np.random.uniform(12., 16., size=nwalkers) # const. offset (mag) p0[:,4] = np.random.normal(0.5, 0.05, size=(nwalkers)) # outlier probabilty sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(time,mag,mag_err)) pos, prob, state = sampler.run_mcmc(p0, 1000) best_pos = sampler.flatlnprobability.argmax() new_pos = emcee.utils.sample_ball(sampler.flatchain[best_pos], sampler.flatchain[best_pos]/100, size=nwalkers) sampler.reset() pos, prob, state = sampler.run_mcmc(new_pos, 1000) extents = [(0.5*truth,1.5*truth) for truth in true_params] + [(0,1)] fig = triangle.corner(sampler.flatchain[:,:], labels=param_labels + [""], truths=list(true_params) + [0.1], range=extents, plot_datapoints=False) nsamples = sampler.flatchain.shape[0] plt.figure(figsize=(12,4)) plt.errorbar(time, mag, mag_err, marker='o', linestyle='none', ecolor='#aaaaaa') t = np.linspace(min(time), max(time), 1000) for ii in range(10): idx = np.random.randint(0, nsamples) params = sampler.flatchain[idx,:4] model_mag = light_curve_model(params, t) plt.plot(t, model_mag, marker=None, color='#f03b20', alpha=0.4) plt.xlim(min(time), max(time))
Fitting a model with outliers using MCMC.ipynb
adrn/stats-tutorials
mit
Let see what is inside by printing out the first few lines.
print " ".join([field.ljust(10) for field in house_data.keys()]) for i in xrange(10): print " ".join([str(house_data[field][i]).ljust(10) for field in house_data.keys()]) TOTALS = len(house_data['House']) print "...\n\nTotal number of samples: {}".format(TOTALS)
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
For univariate regression, we are interested in the "size" parameter only. Let's extract necessary data and visualise it.
X, Z = ml_utils.extract_data(house_data, ['size'], ['price']) Z = Z/1000.0 #price has unit x1000 USD plt.plot(X[0],Z[0], '.') plt.xlabel('size (feet^2)') plt.ylabel('price (USD x1000)') plt.title('house data scatter plot') plt.show()
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
Our goal is to build a house price prediction model that will approximate the price of a house given its size. To do it, we need to fit a linear line (y = ax + b) to the data above using linear regression. Remember the procedure: 1. Define training set 2. Define hypothesis function. Here $F(x,W) = Wx$ 3. Loss function. Here $L(W) = \frac{1}{2N}{\sum_{i=1}^N{(F(x^{(i)},W)-z)^2}}$ 4. Update procedure (gradient descent). $W = W - k\frac{\partial L}{\partial W}$ To speed up computation, you should avoid using loop when working with scripting languges e.g. Python, Matlab. Try using array/matrix instead. Here you are provided code for step 1 and 2. Your will be asked to implement step 3 and 4. Some skeleton code will be provided for your convenience.
"""step 1: define training and test set X, Z.""" X_train, Z_train = ml_utils.extract_data(house_data_train, ['size'], ['price']) X_test, Z_test = ml_utils.extract_data(house_data_test, ['size'], ['price']) Z_train = Z_train/1000.0 #price has unit x1000 USD Z_test = Z_test/1000.0 ##normalise data, uncomment for now #X_train, u, scale = ml_utils.normalise_data(X_train) #X_test = ml_utils.normalise_data(X_test, u, scale) N = Z_train.size #number of training samples ones_array = np.ones((1,N),dtype=np.float32) X_train = np.concatenate((X_train, ones_array), axis=0) #why? X_test = np.concatenate((X_test, np.ones((1, Z_test.size), dtype=np.float32)), axis = 0) #same for test data print "size of X_train ", X_train.shape print "size of Z_train ", Z_train.shape """step 2: define hypothesis function""" def F_Regression(X, W): """ Compute the hypothesis function y=F(x,W) in batch. input: X input array, must has size DxN (each column is one sample) W parameter array, must has size 1xD output: linear multiplication of W*X, size 1xN """ return np.dot(W,X)
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
Task 1.1: define the loss function for linear regression according to the following formula: $$L = \frac{1}{2N}{\sum_{i=1}^N{(y^{(i)}-z^{(i)})^2}}$$ Please fill in the skeleton code below. Hints: (i) in Python numpy the square operator $x^2$ is implemented as x**2; (ii) try to use matrix form and avoid for loop
"""step 3: loss function""" def Loss_Regression(Y, Z): """ Compute the loss between the predicted (Y=F(X,W)) and the groundtruth (Z) values. input: Y predicted results Y = F(X,W) with given parameter W, has size 1xN Z groundtruth vector Z, has size 1xN output: loss value, is a scalar """ #enter the code here N = float(Z.size) diff = Y-Z return 1/(2*N)*np.dot(diff, diff.T).squeeze()
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
Task 1.2: compute gradient of the loss function w.r.t parameter W according to the following formula:<br> $$\frac{\partial L}{\partial W} = \frac{1}{N}\sum_{i=1}^N{(y^{(i)}-z^{(i)})x^{(i)}}$$ Please fill in the skeleton code below.
"""step 4: gradient descent - compute gradient""" def dLdW_Regression(X, Y, Z): """ Compute gradient of the loss w.r.t parameter W. input: X input array, each column is one sample, has size DxN Y predicted values, has size 1xN Z groundtruth values, has size 1xN output: gradient, has same size as W """ #enter the code here N = float(Z.size) return 1/N * (Y-Z).dot(X.T)
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
Now we will perform gradient descent update procedure according to the following formula: $$W = W - k\frac{\partial L}{\partial W}$$ Here we use fixed number of iterations and learning rate.
"""step 4: gradient descent - update loop""" np.random.seed(0) W = np.random.rand(1,X_train.shape[0]).astype(np.float32) #W has size 1xD, randomly initialised k = 1e-8 #learning rate niters = 160 #number of training iterations #visualisation settings vis_interval = niters/50 loss_collections = [] plt.close() plt.ion() fig = plt.figure(1,figsize=(16, 4)) axis_loss = fig.add_subplot(131) axis_data = fig.add_subplot(132) for i in xrange(niters): Y_train = F_Regression(X_train,W) #compute hypothesis function aka. predicted values loss = Loss_Regression(Y_train, Z_train) #compute loss dLdW = dLdW_Regression(X_train, Y_train, Z_train) #compute gradient W = W - k*dLdW #update loss_collections.append(loss) if (i+1)% vis_interval == 0: ml_utils.plot_loss(axis_loss, range(i+1),loss_collections, "loss = " + str(loss)) ml_utils.plot_scatter_and_line(axis_data, X_train, Z_train, W, "iter #" + str(i)) fig.canvas.draw() print "Learned parameters ", W.squeeze()
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
Quiz: you may notice the learning rate k is set to $10^{-8}$. Why is it too small? Try to play with several bigger values of k, you will soon find out that the training is extremely sensitive to the learning rate (the training easily diverges or even causes "overflow" error with large k).<br><br> Answer: It is because both the input (size of house) and output (price) have very large range of values, which result in very large gradient. Task 1.3: Test your learned model. Suppose you want to sell a house of size 3000 $feat^2$, how much do you expect your house will cost?<br> Answer: you should get around 260k USD for that house.
x = 3000 x = np.array([x,1])[...,None] #make sure feature vector has size 2xN, here N=1 print "Expected price: ", F_Regression(x,W).squeeze()
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
Task 1.4: The gradient descent in the code above terminates after 100 iterations. You may want it to terminate when improvement in the loss is below a threshold. $$\Delta L_t = |L_t - L_{t-1}| < \epsilon$$ Edit the code to terminate the loop when the loss improvement is below $\epsilon=10^{-2}$. Re-evaluate your model to see if its performance has improved.
"""step 4: gradient descent - update loop""" W = np.random.rand(1,X_train.shape[0]).astype(np.float32) #W has size 1xD, randomly initialised k = 1e-8 #learning rate epsilon = 1e-2 #terminate condition #visualisation settings vis_interval = 10 loss_collections = [] prev_loss = 0 plt.close() plt.ion() fig = plt.figure(1,figsize=(16, 4)) axis_loss = fig.add_subplot(131) axis_data = fig.add_subplot(132) while(1): Y_train = F_Regression(X_train,W) #compute hypothesis function aka. predicted values loss = Loss_Regression(Y_train, Z_train) #compute loss dLdW = dLdW_Regression(X_train, Y_train, Z_train) #compute gradient W = W - k*dLdW #update loss_collections.append(loss) if abs(loss - prev_loss) < epsilon: break prev_loss = loss if (len(loss_collections)+1) % vis_interval==0: #print "Iter #", len(loss_collections) ml_utils.plot_loss(axis_loss, range(len(loss_collections)),loss_collections, "loss = " + str(loss)) ml_utils.plot_scatter_and_line(axis_data, X_train, Z_train, W, "iter #" + str(len(loss_collections))) fig.canvas.draw() print "Learned parameters ", W.squeeze() print "Learning terminates after {} iterations".format(len(loss_collections)) #run the test Y_test = F_Regression(X_test, W) error = Loss_Regression(Y_test, Z_test) print "Evaluation error: ", error
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
Confirm that the error measurement on the test set has improved. 1.2 Multivariate regression So far we assume the house price is affected by the size only. Now let consider also other fields "Bedrooms", "Baths", "lot" (location) and "NW" (whether or not the houses face Nothern West direction).<br><br> Important: now your feature vector is multi-dimensional, it is crucial to normalise your training set for gradient descent to converge properly. The code below is almost identical to the previous step 1, except it loads more fields and implements data normalisation.
"""step 1: define training set X, Z.""" selected_fields = ['size', 'Bedrooms', 'Baths', 'lot', 'NW'] X_train, Z_train = ml_utils.extract_data(house_data_train, selected_fields, ['price']) X_test, Z_test = ml_utils.extract_data(house_data_test, selected_fields, ['price']) Z_train = Z_train/1000.0 #price has unit x1000 USD Z_test = Z_test/1000.0 ##normalise X_train, u, scale = ml_utils.normalise_data(X_train) X_test = ml_utils.normalise_data(X_test, u, scale) N = Z_train.size #number of training samples ones_array = np.ones((1,N),dtype=np.float32) X_train = np.concatenate((X_train, ones_array), axis=0) #why? X_test = np.concatenate((X_test, np.ones((1, Z_test.size), dtype=np.float32)), axis = 0) #same for test data print "size of X_train ", X_train.shape print "size of Z_train ", Z_train.shape
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
Now run step 2-4 again. Note the followings: 1. You need not to modify the Loss_Regression and dLdW_Regression functions. They should generalise enough to work with multi-dimensional data 2. Since your training samples are normalised you can now use much higher learning rate e.g. k = 1e-2 3. Note that the plot function plot_scatter_and_line will not work in multivariate regression since it is designed for 1-D input only. Consider commenting it out.<br> Question: how many iterations are required to pass the threshold $\Delta L < 10^{-2}$ ?<br> Answer: ~4000 iterations (and it will take a while to complete). Task 1.5: (a) evaluate your learned model on the test set. (b) Suppose the house you want to sell has a size of 3000 $feet^2$, has 3 bedrooms, 2 baths, lot number 10000 and in NW direction. How much do you think its price would be? Hints: don't forget to normalise the test sample.<br> Answer: You will get ~150k USD only, much lower than the previous prediction based on size only. Your house has an advantage of size, but other parameters matter too.
"""step 4: gradient descent - update loop""" """ same code but change k = 1e-2""" W = np.random.rand(1,X_train.shape[0]).astype(np.float32) #W has size 1xD, randomly initialised k = 1e-2 #learning rate epsilon = 1e-2 #terminate condition #visualisation settings vis_interval = 10 loss_collections = [] prev_loss = 0 plt.close() plt.ion() fig = plt.figure(1,figsize=(16, 4)) axis_loss = fig.add_subplot(131) #axis_data = fig.add_subplot(132) while(1): Y_train = F_Regression(X_train,W) #compute hypothesis function aka. predicted values loss = Loss_Regression(Y_train, Z_train) #compute loss dLdW = dLdW_Regression(X_train, Y_train, Z_train) #compute gradient W = W - k*dLdW #update loss_collections.append(loss) if abs(loss - prev_loss) < epsilon: break prev_loss = loss if (len(loss_collections)+1) % vis_interval==0: #print "Iter #", len(loss_collections) ml_utils.plot_loss(axis_loss, range(len(loss_collections)),loss_collections, "loss = " + str(loss)) #ml_utils.plot_scatter_and_line(axis_data, X_train, Z_train, W, "iter #" + str(len(loss_collections))) fig.canvas.draw() print "Learned parameters ", W.squeeze() print "Learning terminates after {} iterations".format(len(loss_collections)) """apply on the test set""" Y_test = F_Regression(X_test, W) error = Loss_Regression(Y_test, Z_test) print "Evaluation error: ", error """test a single sample""" x = np.array([3000, 3,2, 10000, 1],dtype=np.float32)[...,None] x = ml_utils.normalise_data(x, u, scale) x = np.concatenate((x,np.ones((1,1))),axis=0) print "Price: ", F_Regression(x,W).squeeze()
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
1.3 Gradient descent with momentum In the latest experiment, our training takes ~4000 iterations to converge. Now let try gradient descent with momentum to speed up the training. We will employ the following formula: $$v_t = m*v_{t-1} + k\frac{\partial L}{\partial W}$$ $$W = W - v_t$$
"""step 4: gradient descent with momentum - update loop""" W = np.random.rand(1,X_train.shape[0]).astype(np.float32) #W has size 1xD, randomly initialised k = 1e-2 #learning rate epsilon = 1e-2 #terminate condition m = 0.9 #momentum v = 0 #initial velocity #visualisation settings vis_interval = 10 loss_collections = [] prev_loss = 0 plt.close() plt.ion() fig = plt.figure(1,figsize=(16, 4)) axis_loss = fig.add_subplot(131) #axis_data = fig.add_subplot(132) while(1): Y_train = F_Regression(X_train,W) #compute hypothesis function aka. predicted values loss = Loss_Regression(Y_train, Z_train) #compute loss dLdW = dLdW_Regression(X_train, Y_train, Z_train) #compute gradient v = v*m + k*dLdW W = W - v #update loss_collections.append(loss) if abs(loss - prev_loss) < epsilon: break prev_loss = loss if (len(loss_collections)+1) % vis_interval==0: #print "Iter #", len(loss_collections) ml_utils.plot_loss(axis_loss, range(len(loss_collections)),loss_collections, "loss = " + str(loss)) #ml_utils.plot_scatter_and_line(axis_data, X_train, Z_train, W, "iter #" + str(len(loss_collections))) fig.canvas.draw() print "Learned parameters ", W.squeeze() print "Learning terminates after {} iterations".format(len(loss_collections))
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
Task 2.1: your first task is to define the hypothesis function. Do you remember the hypothesis function in a binary classification task? It has form of a sigmoid function: $$F(x,W) = \frac{1}{1+e^{-Wx}}$$
def F_Classification(X, W): """ Compute the hypothesis function given input array X and parameter W input: X input array, must has size DxN (each column is one sample) W parameter array, must has size 1xD output: sigmoid of W*X, size 1xN """ return 1/(1+np.exp(-np.dot(W,X)))
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
Task 2.2: define the loss function for binary classification. It is called "negative log loss": $$L(W) = -\frac{1}{N} \sum_{i=1}^N{[z^{(i)} log(F(x^{(i)},W)) + (1-z^{(i)})(log(1-F(x^{(i)},W))]}$$ Next, define the gradient function: $$\frac{\partial L}{\partial W} = \frac{1}{N}(F(X,W) - Z)X^T$$
"""step 3: loss function for classification""" def Loss_Classification(Y, Z): """ Compute the loss between the predicted (Y=F(X,W)) and the groundtruth (Z) values. input: Y predicted results Y = F(X,W) with given parameter W, has size 1xN Z groundtruth vector Z, has size 1xN output: loss value, is a scalar """ #enter the code here N = float(Z.size) return -1/N*(np.dot(np.log(Y), Z.T) + np.dot(np.log(1-Y), (1-Z).T)).squeeze() """step 4: gradient descent for classification - compute gradient""" def dLdW_Classification(X, Y, Z): """ Compute gradient of the loss w.r.t parameter W. input: X input array, each column is one sample, has size DxN Y probability of label = 1, has size 1xN Z groundtruth values, has size 1xN output: gradient, has same size as W """ #enter the code here N = float(Z.size) return 1/N * (Y-Z).dot(X.T) W = np.random.rand(1,X_train.shape[0]).astype(np.float32) #W has size 1xD, randomly initialised k = 0.2 #learning rate epsilon = 1e-6 #terminate condition m = 0.9 #momentum v = 0 #initial velocity #visualisation settings vis_interval = 10 loss_collections = [] prev_loss = 0 plt.close() plt.ion() fig = plt.figure(1,figsize=(16, 4)) axis_loss = fig.add_subplot(131) axis_data = fig.add_subplot(132) while(1): Y_train = F_Classification(X_train,W) #compute hypothesis function aka. predicted values loss = Loss_Classification(Y_train, Z_train) #compute loss dLdW = dLdW_Classification(X_train, Y_train, Z_train) #compute gradient v = v*m + k*dLdW W = W - v #update loss_collections.append(loss) if abs(loss - prev_loss) < epsilon: break prev_loss = loss if (len(loss_collections)+1) % vis_interval==0: ml_utils.plot_loss(axis_loss, range(len(loss_collections)),loss_collections, "loss = " + str(loss)) ml_utils.plot_scatter_with_label_2d(axis_data, X_train, Z_train, W, "student score scatter plot") fig.canvas.draw() print "Learned parameters ", W.squeeze() print "Learning terminates after {} iterations".format(len(loss_collections)) #evaluate Y_test = F_Classification(X_test, W) predictions = Y_test > 0.5 accuracy = np.sum(predictions == Z_test)/float(Z_test.size) print "Test accuracy: ", accuracy
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
We achieve 90% accuracy (only two students have been misclassified). Not too bad, isn't it? Task 2.3: regularisation Now we want to add a regularisation term into the loss to prevent overfitting. Regularisation loss is simply magnitude of the parameter vector W after removing the last element (i.e. bias doesn't count to regularisation). $$L_R = \frac{1}{2}|W'|^2$$ where W' is W with the last element truncated.<br> Now the total loss would be: $$L(W) = -\frac{1}{N} \sum_{i=1}^N{[z^{(i)} log(F(x^{(i)},W)) + (1-z^{(i)})(log(1-F(x^{(i)},W))]} + \frac{1}{2}|W'|^2$$ The gradient become: $$\frac{\partial L}{\partial W} = \frac{1}{N}(F(X,W) - Z)X^T + W''$$ where W'' is W with the last element change to 0. Your task is to implement the loss and gradient function with added regularisation.
"""step 3: loss function with regularisation""" def Loss_Classification_Reg(Y, Z, W): """ Compute the loss between the predicted (Y=F(X,W)) and the groundtruth (Z) values. input: Y predicted results Y = F(X,W) with given parameter W, has size 1xN Z groundtruth vector Z, has size 1xN W parameter vector, size 1xD output: loss value, is a scalar """ #enter the code here N = float(Z.size) W_ = W[:,:-1] return -1/N*(np.dot(np.log(Y), Z.T) + np.dot(np.log(1-Y), (1-Z).T)).squeeze() + 0.5*np.dot(W_,W_.T).squeeze() """step 4: gradient descent with regularisation - compute gradient""" def dLdW_Classification_Reg(X, Y, Z, W): """ Compute gradient of the loss w.r.t parameter W. input: X input array, each column is one sample, has size DxN Y probability of label = 1, has size 1xN Z groundtruth values, has size 1xN W parameter vector, size 1xD output: gradient, has same size as W """ #enter the code here N = float(Z.size) W_ = W W_[:,-1] = 0 return 1/N * (Y-Z).dot(X.T) + W_
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
Rerun the update loop again with the new loss and gradient functions. Note you may need to change the learning rate accordingly to have proper convergence. Now you have implemented both regularisation and momentum techniques, you can use a standard learning rate value of 0.01 which is widely used in practice.
""" gradient descent with regularisation- parameter update loop""" W = np.random.rand(1,X_train.shape[0]).astype(np.float32) #W has size 1xD, randomly initialised k = 0.01 #learning rate epsilon = 1e-6 #terminate condition m = 0.9 #momentum v = 0 #initial velocity #visualisation settings vis_interval = 10 loss_collections = [] prev_loss = 0 plt.close() plt.ion() fig = plt.figure(1,figsize=(16, 4)) axis_loss = fig.add_subplot(131) axis_data = fig.add_subplot(132) for i in range(500): Y_train = F_Classification(X_train,W) #compute hypothesis function aka. predicted values loss = Loss_Classification_Reg(Y_train, Z_train, W) #compute loss dLdW = dLdW_Classification_Reg(X_train, Y_train, Z_train, W) #compute gradient v = v*m + k*dLdW W = W - v #update loss_collections.append(loss) if abs(loss - prev_loss) < epsilon: break prev_loss = loss if (len(loss_collections)+1) % vis_interval==0: ml_utils.plot_loss(axis_loss, range(len(loss_collections)),loss_collections, "loss = " + str(loss)) ml_utils.plot_scatter_with_label_2d(axis_data, X_train, Z_train, W, "student score scatter plot") fig.canvas.draw() print "Learned parameters ", W.squeeze() print "Learning terminates after {} iterations".format(len(loss_collections))
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
Question: Do you see any improvement in accuracy or convergence speed? Why? Answer: Regularisation does help speed up the training (it adds stricter rules to the update procedure). Accuracy is the same (90%) is probably because (i) number of parameters to be trained is small (2-D) and so is the number of training samples; and (ii) the data are well separated. In a learning task which involves large number of parameters (such as neural network), regularisation proves a very efficient technique. 2.2 Multi-class classification Here we are working with a very famous dataset. The Iris flower dataset has 150 samples of 3 Iris flower species (Setosa, Versicolour, and Virginica), each sample stores the height and length of its sepal and pedal in cm (4-D in total). Your task is to build a classifier to distinguish these flowers.
#read the Iris dataset iris = np.load('data/iris.npz') X = iris['X'] Z = iris['Z'] print "size X ", X.shape print "size Z ", Z.shape #split train/test with ratio 120:30 TOTALS = Z.size idx = np.random.permutation(TOTALS) idx_train = idx[:120] idx_test = idx[120:] X_train = X[:, idx_train] X_test = X[:, idx_test] Z_train = Z[:, idx_train] Z_test = Z[:, idx_test] #normalise data X_train, u, scale = ml_utils.normalise_data(X_train) X_test = ml_utils.normalise_data(X_test, u, scale) #concatenate array of "1s" to X array X_train = np.concatenate((X_train, np.ones_like(Z_train)), axis = 0) X_test = np.concatenate((X_test, np.ones_like(Z_test)), axis = 0)
Intro ML Semcomp/semcomp17_ml/.ipynb_checkpoints/semcomp17_ml_answer-checkpoint.ipynb
marcelomiky/PythonCodes
mit
In the previous chapter we simulated a penny falling in a vacuum, that is, without air resistance. But the computational framework we used is very general; it is easy to add additional forces, including drag. In this chapter, I present a model of drag force and add it to the simulation. Drag force As an object moves through a fluid, like air, the object applies force to the air and, in accordance with Newton's third law of motion, the air applies an equal and opposite force to the object (see http://modsimpy.com/newton). The direction of this drag force is opposite the direction of travel, and its magnitude is given by the drag equation (see http://modsimpy.com/drageq): $$F_d = \frac{1}{2}~\rho~v^2~C_d~A$$ where $F_d$ is force due to drag, in newtons (N). $\rho$ is the density of the fluid in kg/m^3^. $v$ is the magnitude of velocity in m/s. $A$ is the reference area of the object, in m^2^. In this context, the reference area is the projected frontal area, that is, the visible area of the object as seen from a point on its line of travel (and far away). $C_d$ is the drag coefficient, a dimensionless quantity that depends on the shape of the object (including length but not frontal area), its surface properties, and how it interacts with the fluid. For objects moving at moderate speeds through air, typical drag coefficients are between 0.1 and 1.0, with blunt objects at the high end of the range and streamlined objects at the low end (see http://modsimpy.com/dragco). For simple geometric objects we can sometimes guess the drag coefficient with reasonable accuracy; for more complex objects we usually have to take measurements and estimate $C_d$ from data. Of course, the drag equation is itself a model, based on the assumption that $C_d$ does not depend on the other terms in the equation: density, velocity, and area. For objects moving in air at moderate speeds (below 45 mph or 20 m/s), this model might be good enough, but we should remember to revisit this assumption. For the falling penny, we can use measurements to estimate $C_d$. In particular, we can measure terminal velocity, $v_{term}$, which is the speed where drag force equals force due to gravity: $$\frac{1}{2}~\rho~v_{term}^2~C_d~A = m g$$ where $m$ is the mass of the object and $g$ is acceleration due to gravity. Solving this equation for $C_d$ yields: $$C_d = \frac{2~m g}{\rho~v_{term}^2~A}$$ According to Mythbusters, the terminal velocity of a penny is between 35 and 65 mph (see http://modsimpy.com/mythbust). Using the low end of their range, 40 mph or about 18 m/s, the estimated value of $C_d$ is 0.44, which is close to the drag coefficient of a smooth sphere. Now we are ready to add air resistance to the model. The Params Object As the number of system parameters increases, and as we need to do more work to compute them, we will find it useful to define a Params object to contain the quantities we need to make a System object. Params objects are similar to System objects, and we initialize them the same way. Here's the Params object for the falling penny:
from modsim import Params params = Params( mass = 0.0025, # kg diameter = 0.019, # m rho = 1.2, # kg/m**3 g = 9.8, # m/s**2 v_init = 0, # m / s v_term = 18, # m / s height = 381, # m t_end = 30, # s )
soln/chap21.ipynb
AllenDowney/ModSim
gpl-2.0
The mass and diameter are from http://modsimpy.com/penny. The density of air depends on temperature, barometric pressure (which depends on altitude), humidity, and composition (http://modsimpy.com/density). I chose a value that might be typical in Boston, Massachusetts at 20 °C. Here's a version of make_system that takes the Params object and computes the inital state, init, the area, and the coefficient of drag. Then it returns a System object with the quantities we'll need for the simulation.
from numpy import pi from modsim import State from modsim import System def make_system(params): init = State(y=params.height, v=params.v_init) area = pi * (params.diameter/2)**2 C_d = (2 * params.mass * params.g / (params.rho * area * params.v_term**2)) return System(init=init, area=area, C_d=C_d, mass=params.mass, rho=params.rho, g=params.g, t_end=params.t_end )
soln/chap21.ipynb
AllenDowney/ModSim
gpl-2.0
Here's the plot of position as a function of time.
from modsim import decorate def plot_position(results): results.y.plot() decorate(xlabel='Time (s)', ylabel='Position (m)') plot_position(results)
soln/chap21.ipynb
AllenDowney/ModSim
gpl-2.0
Exercise: Suppose we drop a quarter from the Empire State Building and find that its flight time is 19.1 seconds. Use this measurement to estimate terminal velocity and coefficient of drag. You can get the relevant dimensions of a quarter from https://en.wikipedia.org/wiki/Quarter_(United_States_coin). Create a Params object with new values of mass and diameter. We don't know v_term, so we'll start with the initial guess 18 m/s. Use make_system to create a System object. Call run_solve_ivp to simulate the system. How does the flight time of the simulation compare to the measurement? Try a few different values of v_term and see if you can get the simulated flight time close to 19.1 seconds. Optionally, write an error function and use root_scalar to improve your estimate. Use your best estimate of v_term to compute C_d. Note: I fabricated the observed flight time, so don't take the results of this exercise too seriously.
# Solution params_quarter = params.set( mass = 0.0057, # kg diameter = 0.024, # m flight_time = 19.1, # s ) # Solution system3 = make_system(params_quarter) # Solution # Run the simulation results3, details3 = run_solve_ivp(system3, slope_func, events=event_func) details3.message # Solution # And get the flight time t_sidewalk = results3.index[-1] t_sidewalk # Solution # The flight time is a little long, # so we could increase `v_term` and try again. # Or we could write an error function def error_func(guess, params): """Final height as a function of C_d. guess: guess at v_term params: Params object returns: height in m """ print(guess) params = params.set(v_term=guess) system = make_system(params) results, details = run_solve_ivp(system, slope_func, events=event_func) t_sidewalk = results.index[-1] error = t_sidewalk - params.flight_time return error # Solution # We can test the error function like this v_guess1 = 18 error_func(v_guess1, params_quarter) # Solution v_guess2 = 22 error_func(v_guess2, params_quarter) # Solution # Now we can use `root_scalar` to find the value of # `v_term` that yields the measured flight time. from scipy.optimize import root_scalar res = root_scalar(error_func, params_quarter, bracket=[v_guess1, v_guess2]) # Solution v_term = res.root v_term # Solution # Plugging in the estimated value, # we can use `make_system` to compute `C_d` system4 = make_system(params_quarter.set(v_term=res.root)) system4.C_d
soln/chap21.ipynb
AllenDowney/ModSim
gpl-2.0
Plotting topographic maps of evoked data Load evoked data and plot topomaps for selected time points using multiple additional options.
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu> # Tal Linzen <linzen@nyu.edu> # Denis A. Engeman <denis.engemann@gmail.com> # Mikołaj Magnuski <mmagnuski@swps.edu.pl> # # License: BSD (3-clause) # sphinx_gallery_thumbnail_number = 5 import numpy as np import matplotlib.pyplot as plt from mne.datasets import sample from mne import read_evokeds print(__doc__) path = sample.data_path() fname = path + '/MEG/sample/sample_audvis-ave.fif' # load evoked corresponding to a specific condition # from the fif file and subtract baseline condition = 'Left Auditory' evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
0.18/_downloads/6d7b5624e4fa6fee90fb68aca9314f7f/plot_evoked_topomap.ipynb
mne-tools/mne-tools.github.io
bsd-3-clause
Some global data
SP500_Sectors = ['SPY', 'XLB', 'XLE', 'XLF', 'XLI', 'XLK', 'XLP', 'XLU', 'XLV', 'XLY'] Other_Sectors = ['RSP', 'DIA', 'IWM', 'QQQ', 'DAX', 'EEM', 'TLT', 'GLD', 'XHB'] Diversified_Assets = ['SPY', 'TLT', 'NLY', 'GLD'] Diversified_Assets_Reddit = ['IWB', 'IEV', 'EWJ', 'EPP', 'IEF', 'SHY', 'GLD'] Robot_Dual_Momentum_Equities = ['SPY', 'CWI'] Robot_Dual_Momentum_Bonds = ['CSJ', 'HYG'] Robot_Dual_Momentum_Equities_Bonds = ['SPY', 'AGG'] Robot_Wealth = ['IWM', 'SPY', 'VGK', 'IEV', 'EWJ', 'EPP', 'IEF', 'SHY', 'GLD'] # Pick one of the above symbols = SP500_Sectors capital = 10000 start = datetime.datetime(2007, 1, 1) #start = datetime.datetime(*pf.SP500_BEGIN) end = datetime.datetime.now() #end = datetime.datetime(2019, 12, 1) options = { 'use_adj' : True, 'use_cache' : True, 'lookback': 6, 'margin': 1, 'use_absolute_mom': False, 'use_regime_filter': False, 'top_tier': 2 #'top_tier': int(len(symbols)/2) } options
examples/190.momentum-dmsr-portfolio/strategy.ipynb
fja05680/pinkfish
mit
View logs
s.rlog.head() s.tlog.tail() s.dbal.tail()
examples/190.momentum-dmsr-portfolio/strategy.ipynb
fja05680/pinkfish
mit
Your task starts here First, let's write function that predicts class given X. Since the problem above isn't linearly separable, we add quadratic features to the classifier. This transformation is implemented in the expand function. don't forget to expand X inside classify and other functions Classifying sample should not be much harder that computing sign of dot product.
def expand(X): X_ = np.zeros((X.shape[0], 6)) X_[:,0:2] = X X_[:,2:4] = X**2 X_[:,4] = X[:,0] * X[:,1] X_[:,5] = 1 return X_ def classify(X, w): """ Given feature matrix X [n_samples,2] and weight vector w [6], return an array of +1 or -1 predictions""" <your code here>
Seminar2/Seminar2.ipynb
ddtm/dl-course
mit
The loss you should try to minimize is the Hinge Loss. $$ L = {1 \over N} \sum_i max(0,1-y_i \cdot \vec w \vec x_i) $$
def compute_loss(X, y, w): """ Given feature matrix X [n_samples,2], target vector [n_samples] of +1/-1, and weight vector w [6], compute scalar loss function using formula above. """ <your code here> def compute_grad(X, y, w): """ Given feature matrix X [n_samples,2], target vector [n_samples] of +1/-1, and weight vector w [6], compute vector [6] of derivatives of L over each weights. """ <your code here>
Seminar2/Seminar2.ipynb
ddtm/dl-course
mit
Implement gradient descent with momentum and test it's performance for different learning rate and momentum values.
w = np.array([1,0,0,0,0,0]) alpha = 0.0 # learning rate mu = 0.0 # momentum n_iter = 50 batch_size = 4 loss = np.zeros(n_iter) plt.figure(figsize=(12,5)) for i in range(n_iter): ind = random.sample(range(X.shape[0]), batch_size) loss[i] = compute_loss(X, y, w) visualize(X[ind,:], y[ind], w, loss, n_iter) <update w and anything else here> visualize(X, y, w, loss, n_iter) plt.clf()
Seminar2/Seminar2.ipynb
ddtm/dl-course
mit
Implement RMSPROP algorithm
w = np.array([1,0,0,0,0,0]) alpha = 0.0 # learning rate mean_squared_norm = 0.0 #moving average of gradient norm squared n_iter = 50 batch_size = 4 loss = np.zeros(n_iter) plt.figure(figsize=(12,5)) for i in range(n_iter): ind = random.sample(range(X.shape[0]), batch_size) loss[i] = compute_loss(X, y, w) visualize(X[ind,:], y[ind], w, loss, n_iter) <update w and anything else here> visualize(X, y, w, loss, n_iter) plt.clf()
Seminar2/Seminar2.ipynb
ddtm/dl-course
mit
Grading We will create a grader instance below and use it to collect your answers. Note that these outputs will be stored locally inside grader and will be uploaded to the platform only after running submitting function in the last part of this assignment. If you want to make a partial submission, you can run that cell anytime you want.
grader = EMGrader()
python/coursera-BayesianML/02_EM_assignment.ipynb
saketkc/notebooks
bsd-2-clause
Implementing EM for GMM Indented block For debugging, we will use samples from a Gaussian mixture model with unknown mean, variance, and priors. We also added initial values of parameters for grading purposes.
samples = np.load('samples.npz') X = samples['data'] pi0 = samples['pi0'] mu0 = samples['mu0'] sigma0 = samples['sigma0'] plt.scatter(X[:, 0], X[:, 1], c='grey', s=30) plt.axis('equal') plt.show()
python/coursera-BayesianML/02_EM_assignment.ipynb
saketkc/notebooks
bsd-2-clause
Reminder Remember, that EM algorithm is a coordinate descent optimization of variational lower bound $\mathcal{L}(\theta, q) = \int q(T) \log\frac{p(X, T|\theta)}{q(T)}dT\to \max$. <b>E-step</b>:<br> $\mathcal{L}(\theta, q) \to \max\limits_{q} \Leftrightarrow \mathcal{KL} [q(T) \,\|\, p(T|X, \theta)] \to \min \limits_{q\in Q} \Rightarrow q(T) = p(T|X, \theta)$<br> <b>M-step</b>:<br> $\mathcal{L}(\theta, q) \to \max\limits_{\theta} \Leftrightarrow \mathbb{E}{q(T)}\log p(X,T | \theta) \to \max\limits{\theta}$ For GMM, $\theta$ is a set of parameters that consists of mean vectors $\mu_c$, covariance matrices $\Sigma_c$ and priors $\pi_c$ for each component. Latent variables $T$ are indices of components to which each data point is assigned, i.e. $t_i$ is the cluster index for object $x_i$. The joint distribution can be written as follows: $\log p(T, X \mid \theta) = \sum\limits_{i=1}^N \log p(t_i, x_i \mid \theta) = \sum\limits_{i=1}^N \sum\limits_{c=1}^C q(t_i = c) \log \left (\pi_c \, f_{!\mathcal{N}}(x_i \mid \mu_c, \Sigma_c)\right)$, where $f_{!\mathcal{N}}(x \mid \mu_c, \Sigma_c) = \frac{1}{\sqrt{(2\pi)^n|\boldsymbol\Sigma_c|}} \exp\left(-\frac{1}{2}({x}-{\mu_c})^T{\boldsymbol\Sigma_c}^{-1}({x}-{\mu_c}) \right)$ is the probability density function (pdf) of the normal distribution $\mathcal{N}(x_i \mid \mu_c, \Sigma_c)$. E-step In this step we need to estimate the posterior distribution over the latent variables with fixed values of parameters: $q_i(t_i) = p(t_i \mid x_i, \theta)$. We assume that $t_i$ equals to the cluster index of the true component of the $x_i$ object. To do so we need to compute $\gamma_{ic} = p(t_i = c \mid x_i, \theta)$. Note that $\sum\limits_{c=1}^C\gamma_{ic}=1$. <b>Important trick 1:</b> It is important to avoid numerical errors. At some point you will have to compute the formula of the following form: $\frac{e^{y_i}}{\sum_j e^{y_j}}$, which is called softmax. When you compute exponents of large numbers, some numbers may become infinity. You can avoid this by dividing numerator and denominator by $e^{\max(y)}$: $\frac{e^{y_i-\max(y)}}{\sum_j e^{y_j - \max(y)}}$. After this transformation maximum value in the denominator will be equal to one. All other terms will contribute smaller values. So, to compute desired formula you first subtract maximum value from each component in vector $\mathbf{y}$ and then compute everything else as before. <b>Important trick 2:</b> You will probably need to compute formula of the form $A^{-1}x$ at some point. You would normally inverse $A$ and then multiply it by $x$. A bit faster and more numerically accurate way to do this is to directly solve equation $Ay = x$ by using a special function. Its solution is $y=A^{-1}x$, but the equation $Ay = x$ can be solved by methods which do not explicitely invert the matrix. You can use np.linalg.solve for this. <b>Other usefull functions: </b> <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.slogdet.html">slogdet</a> and <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.det.html#numpy.linalg.det">det</a> <b>Task 1:</b> Implement E-step for GMM using template below.
def E_step(X, pi, mu, sigma): """ Performs E-step on GMM model Each input is numpy array: X: (N x d), data points pi: (C), mixture component weights mu: (C x d), mixture component means sigma: (C x d x d), mixture component covariance matrices Returns: gamma: (N x C), probabilities of clusters for objects """ N = X.shape[0] # number of objects C = pi.shape[0] # number of clusters d = mu.shape[1] # dimension of each object gamma = np.zeros((N, C)) # distribution q(T) ### YOUR CODE HERE for c in range(C): gamma[:, c] = 0.5*np.diag(np.dot((X-mu[c,:]), np.linalg.solve(sigma[c,:,:], (X-mu[c,:]).T))) gamma_max = np.amax(gamma, axis=1, keepdims=True) gamma = gamma-gamma_max sigma_det = np.linalg.det(sigma) gamma = pi * np.exp(-gamma) * np.power(np.power(2*np.pi, N)*sigma_det, -0.5) gamma = gamma/np.sum(gamma, axis=1, keepdims=True) return gamma gamma = E_step(X, pi0, mu0, sigma0) grader.submit_e_step(gamma)
python/coursera-BayesianML/02_EM_assignment.ipynb
saketkc/notebooks
bsd-2-clause
M-step In M-step we need to maximize $\mathbb{E}{q(T)}\log p(X,T | \theta)$ with respect to $\theta$. In our model this means that we need to find optimal values of $\pi$, $\mu$, $\Sigma$. To do so, you need to compute the derivatives and set them to zero. You should start by deriving formulas for $\mu$ as it is the easiest part. Then move on to $\Sigma$. Here it is crucial to optimize function w.r.t. to $\Lambda = \Sigma^{-1}$ and then inverse obtained result. Finaly, to compute $\pi$, you will need <a href="https://www3.nd.edu/~jstiver/FIN360/Constrained%20Optimization.pdf">Lagrange Multipliers technique</a> to satisfy constraint $\sum\limits{i=1}^{n}\pi_i = 1$. <br> <b>Important note:</b> You will need to compute derivatives of scalars with respect to matrices. To refresh this technique from previous courses, see <a href="https://en.wikipedia.org/wiki/Matrix_calculus"> wiki article</a> about it . Main formulas of matrix derivatives can be found in <a href="http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3274/pdf/imm3274.pdf">Chapter 2 of The Matrix Cookbook</a>. For example, there you may find that $\frac{\partial}{\partial A}\log |A| = A^{-T}$. <b>Task 2:</b> Implement M-step for GMM using template below.
def M_step(X, gamma): """ Performs M-step on GMM model Each input is numpy array: X: (N x d), data points gamma: (N x C), distribution q(T) Returns: pi: (C) mu: (C x d) sigma: (C x d x d) """ N = X.shape[0] # number of objects C = gamma.shape[1] # number of clusters d = X.shape[1] # dimension of each object pi = np.zeros(C) mu = np.zeros((C,d)) sigma = np.zeros((C,d,d)) pi = gamma.mean(axis=0) mu = np.dot(gamma.T,X)/gamma.sum(axis=0)[:, np.newaxis] for c in range(C): sigma[c,:,:] = np.dot((X-mu[c]).T, gamma[:,c].reshape(N,1) * (X-mu[c]))/gamma.sum(axis=0)[c] return pi, mu, sigma gamma = E_step(X, pi0, mu0, sigma0) pi, mu, sigma = M_step(X, gamma) grader.submit_m_step(pi, mu, sigma)
python/coursera-BayesianML/02_EM_assignment.ipynb
saketkc/notebooks
bsd-2-clause
Loss function Finally, we need some function to track convergence. We will use variational lower bound $\mathcal{L}$ for this purpose. We will stop our EM iterations when $\mathcal{L}$ will saturate. Usually, you will need only about 10-20 iterations to converge. It is also useful to check that this function never decreases during training. If it does, you have a bug in your code. <b>Task 3:</b> Implement a function that will compute $\mathcal{L}$ using template below. $$\mathcal{L} = \sum_{i=1}^{N} \sum_{c=1}^{C} q(t_i =c) (\log \pi_c + \log f_{!\mathcal{N}}(x_i \mid \mu_c, \Sigma_c)) - \sum_{i=1}^{N} \sum_{c=1}^{K} q(t_i =c) \log q(t_i =c)$$
def compute_vlb(X, pi, mu, sigma, gamma): """ Each input is numpy array: X: (N x d), data points gamma: (N x C), distribution q(T) pi: (C) mu: (C x d) sigma: (C x d x d) Returns value of variational lower bound """ N = X.shape[0] # number of objects C = gamma.shape[1] # number of clusters d = X.shape[1] # dimension of each object ### YOUR CODE HERE loss = np.zeros(N) for c in range(C): mvn = multivariate_normal(mu[c, :], sigma[c, :, :], allow_singular=True) loss += ((np.log(pi[c]) + mvn.logpdf(X)) - np.log(gamma[:,c]))*gamma[:,c] loss = np.sum(loss) return loss pi, mu, sigma = pi0, mu0, sigma0 gamma = E_step(X, pi, mu, sigma) pi, mu, sigma = M_step(X, gamma) loss = compute_vlb(X, pi, mu, sigma, gamma) grader.submit_VLB(loss)
python/coursera-BayesianML/02_EM_assignment.ipynb
saketkc/notebooks
bsd-2-clause
Bringing it all together Now that we have E step, M step and VLB, we can implement the training loop. We will initialize values of $\pi$, $\mu$ and $\Sigma$ to some random numbers, train until $\mathcal{L}$ stops changing, and return the resulting points. We also know that the EM algorithm converges to local optima. To find a better local optima, we will restart the algorithm multiple times from different (random) starting positions. Each training trial should stop either when maximum number of iterations is reached or when relative improvement is smaller than given tolerance ($|\frac{\mathcal{L}i-\mathcal{L}{i-1}}{\mathcal{L}_{i-1}}| \le \text{rtol}$). Remember, that initial (random) values of $\pi$ that you generate must be non-negative and sum up to 1. Also, $\Sigma$ matrices must be symmetric and positive semi-definite. If you don't know how to generate those matrices, you can use $\Sigma=I$ as initialization. You will also sometimes get numerical errors because of component collapsing. The easiest way to deal with this problems is to restart the procedure. <b>Task 4:</b> Implement training procedure
def train_EM(X, C, rtol=1e-3, max_iter=100, restarts=10): ''' Starts with random initialization *restarts* times Runs optimization until saturation with *rtol* reached or *max_iter* iterations were made. X: (N, d), data points C: int, number of clusters ''' N = X.shape[0] # number of objects d = X.shape[1] # dimension of each object best_loss = None best_pi = None best_mu = None best_sigma = None for _ in range(restarts): try: ### YOUR CODE HERE # X: (N x d), data points # gamma: (N x C), distribution q(T) # pi: (C) # mu: (C x d) # sigma: (C x d x d) pi = np.array([1/C]*C) mu = np.random.randn(C,d) #sigmas = np.eye(d) #sigma = np.array([sigmas]*C) sigma = np.zeros((C, d, d)) for c in range(C): sigma[c] = np.eye(d) * np.random.uniform(1, C) for i in range(max_iter): gamma = E_step(X, pi, mu, sigma) pi, mu, sigma = M_step(X, gamma) loss = compute_vlb(X, pi, mu, sigma, gamma) if best_loss is None or loss<best_loss: best_loss = loss best_pi = pi best_mu = mu best_sigma = sigma except np.linalg.LinAlgError: print("Singular matrix: components collapsed") pass return best_loss, best_pi, best_mu, best_sigma best_loss, best_pi, best_mu, best_sigma = train_EM(X, 3) grader.submit_EM(best_loss)
python/coursera-BayesianML/02_EM_assignment.ipynb
saketkc/notebooks
bsd-2-clause
If you implemented all the steps correctly, your algorithm should converge in about 20 iterations. Let's plot the clusters to see it. We will assign a cluster label as the most probable cluster index. This can be found using a matrix $\gamma$ computed on last E-step.
gamma = E_step(X, best_pi, best_mu, best_sigma) labels = gamma.argmax(axis=1) colors = np.array([(31, 119, 180), (255, 127, 14), (44, 160, 44)]) / 255. plt.scatter(X[:, 0], X[:, 1], c=colors[labels], s=30) plt.axis('equal') plt.show()
python/coursera-BayesianML/02_EM_assignment.ipynb
saketkc/notebooks
bsd-2-clause
Authorization & Submission To submit assignment parts to Cousera platform, please, enter your e-mail and token into variables below. You can generate a token on this programming assignment's page. <b>Note:</b> The token expires 30 minutes after generation.
STUDENT_EMAIL = "" STUDENT_TOKEN = "" grader.status()
python/coursera-BayesianML/02_EM_assignment.ipynb
saketkc/notebooks
bsd-2-clause
问题 5- 偏差与方差之间的权衡取舍 当模型以最大深度 1训练时,模型的预测是出现很大的偏差还是出现了很大的方差?当模型以最大深度10训练时,情形又如何呢?图形中的哪些特征能够支持你的结论? 提示: 你如何得知模型是否出现了偏差很大或者方差很大的问题? 答案:  为1时,出现了很大的偏差,因为此时无论是测试数据还是训练数据b标准系数都很低,测试数据和训练数据的标准系数之间差异很小,说明模型无法对数据进行良好预测。 为 10 时,出现了很大的方差,测试数据和训练数据的标准系数之间差异很大,说明出现了过拟合情况。 问题 6- 最优模型的猜测 你认为最大深度是多少的模型能够最好地对未见过的数据进行预测?你得出这个答案的依据是什么? 答案: 3。因为此时测试数据和训练数据的分数之间差异最小,且测试数据的标准系数达到最高。 评价模型表现 在这个项目的最后,你将自己建立模型,并使用最优化的fit_model函数,基于客户房子的特征来预测该房屋的价值。 问题 7- 网格搜索(Grid Search) 什么是网格搜索法?如何用它来优化学习算法? 回答: 是一种把参数网格化的算法。 它会自动生成一个不同参数值组成的“网格”: =================================== ('param1', param3) | ('param1', param4) ('param2', param3) | ('param2', param4) ================================== 通过尝试所有"网格"中使用的参数,并从中选取最佳的参数组合来优化学习算法。 问题 8- 交叉验证 什么是K折交叉验证法(k-fold cross-validation)?优化模型时,使用这种方法对网格搜索有什么好处?网格搜索是如何结合交叉验证来完成对最佳参数组合的选择的? 提示: 跟为何需要一组测试集的原因差不多,网格搜索时如果不使用交叉验证会有什么问题?GridSearchCV中的'cv_results'属性能告诉我们什么? 答案: K折交叉验证法是将训练数据平均分配到K个容器,每次去其中一个做测试数据,其余做训练数据,进行K次后,对训练结果取平均值的一种获得更高精确度的一种算法。 可以时网格搜索的训练结果获得更高的精确度,如果不使用交叉验证,模型的泛化误差会变大,从而影响网格搜索的效果。 网格搜索可以使拟合函数尝试所有的参数组合,并返回一个合适的分类器,自动调整至最佳参数组合。 练习:训练模型 在最后一个练习中,你将需要将所学到的内容整合,使用决策树演算法训练一个模型。为了保证你得出的是一个最优模型,你需要使用网格搜索法训练模型,以找到最佳的 'max_depth' 参数。你可以把'max_depth' 参数理解为决策树算法在做出预测前,允许其对数据提出问题的数量。决策树是监督学习算法中的一种。 此外,你会发现你的实现使用的是 ShuffleSplit() 。它也是交叉验证的一种方式(见变量 'cv_sets')。虽然这不是问题8中描述的 K-Fold 交叉验证,这个教程验证方法也很有用!这里 ShuffleSplit() 会创造10个('n_splits')混洗过的集合,每个集合中20%('test_size')的数据会被用作验证集。当你在实现的时候,想一想这跟 K-Fold 交叉验证有哪些相同点,哪些不同点? 在下方 fit_model 函数中,你需要做的是: - 使用 sklearn.tree 中的 DecisionTreeRegressor 创建一个决策树的回归函数; - 将这个回归函数储存到 'regressor' 变量中; - 为 'max_depth' 创造一个字典,它的值是从1至10的数组,并储存到 'params' 变量中; - 使用 sklearn.metrics 中的 make_scorer 创建一个评分函数; - 将 performance_metric 作为参数传至这个函数中; - 将评分函数储存到 'scoring_fnc' 变量中; - 使用 sklearn.model_selection 中的 GridSearchCV 创建一个网格搜索对象; - 将变量'regressor', 'params', 'scoring_fnc', 和 'cv_sets' 作为参数传至这个对象中; - 将 GridSearchCV 存到 'grid' 变量中。 如果有同学对python函数如何传递多个参数不熟悉,可以参考这个MIT课程的视频。
# TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV' from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import make_scorer from sklearn.model_selection import GridSearchCV def fit_model(X, y): """ Performs grid search over the 'max_depth' parameter for a decision tree regressor trained on the input data [X, y]. """ # Create cross-validation sets from the training data cv_sets = ShuffleSplit(n_splits = 10, test_size = 0.20, random_state = 0) # TODO: Create a decision tree regressor object regressor = DecisionTreeRegressor(random_state=0) # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10 params = {'max_depth': range(1, 11)} # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' scoring_fnc = make_scorer(performance_metric) # TODO: Create the grid search object grid = GridSearchCV(regressor, params, scoring_fnc, cv=cv_sets) # Fit the grid search object to the data to compute the optimal model grid = grid.fit(X, y) # Return the optimal model after fitting the data return grid.best_estimator_
Udacity-ML/boston_housing-master_1/boston_housing.ipynb
quoniammm/happy-machine-learning
mit
Answer: 4。与猜测不同,猜测结果为3。 问题 10 - 预测销售价格 想像你是一个在波士顿地区的房屋经纪人,并期待使用此模型以帮助你的客户评估他们想出售的房屋。你已经从你的三个客户收集到以下的资讯: | 特征 | 客戶 1 | 客戶 2 | 客戶 3 | | :---: | :---: | :---: | :---: | | 房屋内房间总数 | 5 间房间 | 4 间房间 | 8 间房间 | | 社区贫困指数(%被认为是贫困阶层) | 17% | 32% | 3% | | 邻近学校的学生-老师比例 | 15:1 | 22:1 | 12:1 | 你会建议每位客户的房屋销售的价格为多少?从房屋特征的数值判断,这样的价格合理吗? 提示:用你在分析数据部分计算出来的统计信息来帮助你证明你的答案。 运行下列的代码区域,使用你优化的模型来为每位客户的房屋价值做出预测。
# Produce a matrix for client data client_data = [[5, 17, 15], # Client 1 [4, 32, 22], # Client 2 [8, 3, 12]] # Client 3 # Show predictions for i, price in enumerate(reg.predict(client_data)): print "Predicted selling price for Client {}'s home: ${:,.2f}".format(i+1, price)
Udacity-ML/boston_housing-master_1/boston_housing.ipynb
quoniammm/happy-machine-learning
mit
答案: 第一个顾客: $403,025.00. 第二个顾客:: $237,478.72. 第三个顾客:: $931,636.36. 这样的价格是合理的,以第三个顾客为例,他的房间数最多,社区贫困指数最低,且教育资源最丰富,因而价格最贵。以此类推,顾客一二的预测也是合理地。 敏感度 一个最优的模型不一定是一个健壮模型。有的时候模型会过于复杂或者过于简单,以致于难以泛化新增添的数据;有的时候模型采用的学习算法并不适用于特定的数据结构;有的时候样本本身可能有太多噪点或样本过少,使得模型无法准确地预测目标变量。这些情况下我们会说模型是欠拟合的。执行下方区域中的代码,采用不同的训练和测试集执行 fit_model 函数10次。注意观察对一个特定的客户来说,预测是如何随训练数据的变化而变化的。
vs.PredictTrials(features, prices, fit_model, client_data)
Udacity-ML/boston_housing-master_1/boston_housing.ipynb
quoniammm/happy-machine-learning
mit
Load the lending club dataset
loans = graphlab.SFrame('lending-club-data.gl/') loans.head()
notebooks/binary-decision-tree.ipynb
leon-adams/datascience
mpl-2.0
We reassign the labels to have +1 for a safe loan, and -1 for a risky (bad) loan.
loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1) loans = loans.remove_column('bad_loans')
notebooks/binary-decision-tree.ipynb
leon-adams/datascience
mpl-2.0
We will be using 4 categorical features: grade of the loan the length of the loan term the home ownership status: own, mortgage, rent number of years of employment. Since we are building a binary decision tree, we will have to convert these categorical features to a binary representation in a subsequent section using 1-hot encoding.
features = ['grade', # grade of the loan 'term', # the term of the loan 'home_ownership', # home_ownership status: own, mortgage or rent 'emp_length', # number of years of employment ] target = 'safe_loans' loans = loans[features + [target]]
notebooks/binary-decision-tree.ipynb
leon-adams/datascience
mpl-2.0
Subsample dataset to make sure classes are balanced We will undersample the larger class (safe loans) in order to balance out our dataset. This means we are throwing away many data points. We use seed=1 so everyone gets the same results.
safe_loans_raw = loans[loans[target] == 1] risky_loans_raw = loans[loans[target] == -1] # Since there are less risky loans than safe loans, find the ratio of the sizes # and use that percentage to undersample the safe loans. percentage = len(risky_loans_raw)/float(len(safe_loans_raw)) safe_loans = safe_loans_raw.sample(percentage, seed = 1) risky_loans = risky_loans_raw loans_data = risky_loans.append(safe_loans) print "Percentage of safe loans :", len(safe_loans) / float(len(loans_data)) print "Percentage of risky loans :", len(risky_loans) / float(len(loans_data)) print "Total number of loans in our new dataset :", len(loans_data)
notebooks/binary-decision-tree.ipynb
leon-adams/datascience
mpl-2.0
Note: There are many approaches for dealing with imbalanced data, including some where we modify the learning algorithm. These approaches are beyond the scope of this presentation, but some of them are reviewed in this paper. Here, we use the simplest possible approach, where we subsample the overly represented class to get a more balanced dataset. In general, and especially when the data is highly imbalanced, we recommend using more advanced methods. Transform categorical data into binary features In this presentation, we will implement binary decision trees (decision trees for binary features, a specific case of categorical variables taking on two values, e.g., true/false). Since all of our features are currently categorical features, we want to turn them into binary features. For instance, the home_ownership feature represents the home ownership status of the loanee, which is either own, mortgage or rent. For example, if a data point has the feature {'home_ownership': 'RENT'} we want to turn this into three features: { 'home_ownership = OWN' : 0, 'home_ownership = MORTGAGE' : 0, 'home_ownership = RENT' : 1 } Since this code requires a few Python and GraphLab tricks, feel free to use this block of code as is. Refer to the API documentation for a deeper understanding.
loans_data = risky_loans.append(safe_loans) for feature in features: loans_data_one_hot_encoded = loans_data[feature].apply(lambda x: {x: 1}) loans_data_unpacked = loans_data_one_hot_encoded.unpack(column_name_prefix=feature) # Change None's to 0's for column in loans_data_unpacked.column_names(): loans_data_unpacked[column] = loans_data_unpacked[column].fillna(0) loans_data.remove_column(feature) loans_data.add_columns(loans_data_unpacked)
notebooks/binary-decision-tree.ipynb
leon-adams/datascience
mpl-2.0
Train-test split We split the data into a train test split with 80% of the data in the training set and 20% of the data in the test set. We use seed=1 so that everyone gets the same result.
train_data, test_data = loans_data.random_split(.8, seed=1) train_data['safe_loans'].unique() (train_data['safe_loans']).size() print( (train_data['safe_loans'] == 1).sum()) print( (train_data['safe_loans'] == -1).sum()) print( (train_data['safe_loans'] == 1).sum()+ (train_data['safe_loans'] == -1).sum() ) print((train_data['safe_loans']).size())
notebooks/binary-decision-tree.ipynb
leon-adams/datascience
mpl-2.0
Decision tree implementation In this section, we will implement binary decision trees from scratch. There are several steps involved in building a decision tree. For that reason, we have split the entire assignment into several sections. Function to count number of mistakes while predicting majority class Recall that prediction at an intermediate node works by predicting the majority class for all data points that belong to this node. Now, we will write a function that calculates the number of missclassified examples when predicting the majority class. This will be used to help determine which feature is the best to split on at a given node of the tree. Note: Keep in mind that in order to compute the number of mistakes for a majority classifier, we only need the label (y values) of the data points in the node. Steps to follow : * Step 1: Calculate the number of safe loans and risky loans. * Step 2: Since we are assuming majority class prediction, all the data points that are not in the majority class are considered mistakes. * Step 3: Return the number of mistakes. Now, let us write the function intermediate_node_num_mistakes which computes the number of misclassified examples of an intermediate node given the set of labels (y values) of the data points contained in the node.
def intermediate_node_num_mistakes(labels_in_node): # Corner case: If labels_in_node is empty, return 0 if len(labels_in_node) == 0: return 0 number_examples = labels_in_node.size() # Count the number of 1's (safe loans) number_safe_loans = (labels_in_node == 1).sum() # Count the number of -1's (risky loans) number_risky_loans = (labels_in_node == -1).sum() if number_safe_loans >= number_risky_loans: class_predictions = np.ones(number_examples) else: class_predictions = -1*np.ones(number_examples) number_mistakes = (labels_in_node.to_numpy() != class_predictions).sum() return number_mistakes example_labels = graphlab.SArray([-1, -1, 1, 1, 1]) intermediate_node_num_mistakes(example_labels)#.to_numyp == 2 example_labels = graphlab.SArray([-1, -1, 1, 1, 1]) (example_labels == 1).to_numpy()
notebooks/binary-decision-tree.ipynb
leon-adams/datascience
mpl-2.0
Function to pick best feature to split on The function best_splitting_feature takes 3 arguments: 1. The data (SFrame of data which includes all of the feature columns and label column) 2. The features to consider for splits (a list of strings of column names to consider for splits) 3. The name of the target/label column (string) The function will loop through the list of possible features, and consider splitting on each of them. It will calculate the classification error of each split and return the feature that had the smallest classification error when split on. Recall that the classification error is defined as follows: $$ \mbox{classification error} = \frac{\mbox{# mistakes}}{\mbox{# total examples}} $$ Follow these steps: * Step 1: Loop over each feature in the feature list * Step 2: Within the loop, split the data into two groups: one group where all of the data has feature value 0 or False (we will call this the left split), and one group where all of the data has feature value 1 or True (we will call this the right split). Make sure the left split corresponds with 0 and the right split corresponds with 1 to ensure your implementation fits with our implementation of the tree building process. * Step 3: Calculate the number of misclassified examples in both groups of data and use the above formula to compute the classification error. * Step 4: If the computed error is smaller than the best error found so far, store this feature and its error. This may seem like a lot, but we have provided pseudocode in the comments in order to help you implement the function correctly. Note: Remember that since we are only dealing with binary features, we do not have to consider thresholds for real-valued features. This makes the implementation of this function much easier.
def best_splitting_feature(data, features, target): best_feature = None # Keep track of the best feature best_error = 10 # Keep track of the best error so far # Note: Since error is always <= 1, we should intialize it with something larger than 1. # Convert to float to make sure error gets computed correctly. num_data_points = float(len(data)) # Loop through each feature to consider splitting on that feature for feature in features: # The left split will have all data points where the feature value is 0 left_split = data[data[feature] == 0] # The right split will have all data points where the feature value is 1 right_split = data[data[feature] == 1] # Calculate the number of misclassified examples in the left split. left_mistakes = intermediate_node_num_mistakes(left_split[target]) # Calculate the number of misclassified examples in the right split. right_mistakes = intermediate_node_num_mistakes(right_split[target]) # Compute the classification error of this split. error = (left_mistakes + right_mistakes) / num_data_points if error < best_error: (best_error, best_feature) = (error, feature) return best_feature
notebooks/binary-decision-tree.ipynb
leon-adams/datascience
mpl-2.0
Building the tree With the above functions implemented correctly, we are now ready to build our decision tree. Each node in the decision tree is represented as a dictionary which contains the following keys and possible values: { 'is_leaf' : True/False. 'prediction' : Prediction at the leaf node. 'left' : (dictionary corresponding to the left tree). 'right' : (dictionary corresponding to the right tree). 'splitting_feature' : The feature that this node splits on. } First, we will write a function that creates a leaf node given a set of target values.
def create_leaf(target_values): # Create a leaf node leaf = {'splitting_feature' : None, 'left' : None, 'right' : None, 'is_leaf': True } # Count the number of data points that are +1 and -1 in this node. num_ones = len(target_values[target_values == +1]) num_minus_ones = len(target_values[target_values == -1]) # For the leaf node, set the prediction to be the majority class. # Store the predicted class (1 or -1) in leaf['prediction'] if num_ones > num_minus_ones: leaf['prediction'] = 1 else: leaf['prediction'] = -1 return leaf
notebooks/binary-decision-tree.ipynb
leon-adams/datascience
mpl-2.0
We have provided a function that learns the decision tree recursively and implements 3 stopping conditions: 1. Stopping condition 1: All data points in a node are from the same class. 2. Stopping condition 2: No more features to split on. 3. Additional stopping condition: In addition to the above two stopping conditions covered in lecture, in this assignment we will also consider a stopping condition based on the max_depth of the tree. By not letting the tree grow too deep, we will save computational effort in the learning process. Now, we will write down the skeleton of the learning algorithm.
def decision_tree_create(data, features, target, current_depth = 0, max_depth = 10): remaining_features = features[:] # Make a copy of the features. target_values = data[target] print "--------------------------------------------------------------------" print "Subtree, depth = %s (%s data points)." % (current_depth, len(target_values)) # Stopping condition 1 # (Check if there are mistakes at current node. if intermediate_node_num_mistakes(target_values) == 0: print "Stopping condition 1 reached." # If no mistakes at current node, make current node a leaf node return create_leaf(target_values) # Stopping condition 2 (check if there are remaining features to consider splitting on) if remaining_features == ['']: print "Stopping condition 2 reached." # If there are no remaining features to consider, make current node a leaf node return create_leaf(target_values) # Additional stopping condition (limit tree depth) if current_depth >= max_depth: print "Reached maximum depth. Stopping for now." # If the max tree depth has been reached, make current node a leaf node return create_leaf(target_values) # Find the best splitting feature (recall the function best_splitting_feature implemented above) splitting_feature = best_splitting_feature(data, features, target) # Split on the best feature that we found. left_split = data[data[splitting_feature] == 0] right_split = data[data[splitting_feature] == 1] remaining_features.remove(splitting_feature) print "Split on feature %s. (%s, %s)" % (\ splitting_feature, len(left_split), len(right_split)) # Create a leaf node if the split is "perfect" if len(left_split) == len(data): print "Creating leaf node left." return create_leaf(left_split[target]) if len(right_split) == len(data): print "Creating leaf node right." return create_leaf(right_split[target]) # Repeat (recurse) on left and right subtrees left_tree = decision_tree_create(left_split, remaining_features, target, current_depth + 1, max_depth) right_tree = decision_tree_create(right_split, remaining_features, target, current_depth + 1, max_depth) return {'is_leaf' : False, 'prediction' : None, 'splitting_feature': splitting_feature, 'left' : left_tree, 'right' : right_tree}
notebooks/binary-decision-tree.ipynb
leon-adams/datascience
mpl-2.0