markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
Normalization For the sake of simplicity, we will use the 0-1 range normalization: $ x_i = \dfrac{x_i - min(x_i)}{max(x_i) - min(x_i)}$ This is allowed because we do not have that many 'outliers' in our features. The Alpha-Trimmed normalization or Standard Scaler normalization would be more appropriate if we introduced other (interesting) features such as: - Average cases/week in the neighborhood. - Number of cases in the last X days in that neighborhood.
from sklearn.preprocessing import MinMaxScaler normalizer = MinMaxScaler().fit(X_tra) X_tra = normalizer.transform(X_tra) X_val = normalizer.transform(X_val) X_tst = normalizer.transform(X_tst)
notebooks/2016-11-01-dvro-feature-selection.ipynb
dvro/sf-open-data-analysis
mit
Feature Importance Variance Threshold
from sklearn.feature_selection import VarianceThreshold print X_tra.shape threshold=(.999 * (1 - .999)) sel = VarianceThreshold(threshold=threshold) X_tra = sel.fit(X_tra).transform(X_tra) X_val = sel.transform(X_val) X_tst = sel.transform(X_tst) print X_tra.shape removed_features_1 = np.array(columns)[np.where(sel.variances_ < threshold)] selected_features_1 = np.array(feature_columns)[np.where(sel.variances_ >= threshold)] print 'removed_features' print removed_features_1
notebooks/2016-11-01-dvro-feature-selection.ipynb
dvro/sf-open-data-analysis
mit
Correlation
plt.figure(figsize=(12,8)) sns.heatmap(df.corr('pearson'))
notebooks/2016-11-01-dvro-feature-selection.ipynb
dvro/sf-open-data-analysis
mit
features loc_x and loc_y are too correlated with latitude and longitude, respectively, for this reason, we'll delete lox_x and loc_y.
del df['loc_x'] del df['loc_y']
notebooks/2016-11-01-dvro-feature-selection.ipynb
dvro/sf-open-data-analysis
mit
Feature Importance (Trees) This can be done using sklearn.feature_selection.SelectFromModel, however, we do it by ourselves in order to get a better visualization of the process.
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier def feature_importance(X, y, feat_names, forest='random_forest', plot=False, print_=False): # Build a forest and compute the feature importances if forest == 'random_forest': forest = RandomForestClassifier(n_estimators=200, random_state=0) elif forest == 'extra_trees': forest = ExtraTreesClassifier(n_estimators=200, random_state=0) forest.fit(X, y) importances = forest.feature_importances_ sd = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0) mn = np.mean([tree.feature_importances_ for tree in forest.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking if print_: print("Feature ranking:") for f in range(X.shape[1]): print("%d. feature %d (%f) %s" % (f + 1, indices[f], importances[indices[f]], feat_names[indices[f]])) if plot: plt.figure(figsize=(16,3)) plt.title("Feature importances") plt.bar(range(len(importances)), importances[indices], color="r", yerr=sd[indices], align="center") plt.xticks(range(len(importances)), indices) plt.xlim([-1, len(indices)]) plt.show() return indices, importances indices, importances = feature_importance(X_tra, y_tra, selected_features_1, plot=True, forest='random_forest') indices, importances = feature_importance(X_tra, y_tra, selected_features_1, plot=True, forest='extra_trees') from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import roc_auc_score scores = [] for i in range(1,len(indices)): mask = indices[:i] clf = RandomForestClassifier(n_estimators=100) clf.fit(X_tra[:,mask], y_tra) score = roc_auc_score(y_val, clf.predict_proba(X_val[:,mask])[:,1]) scores.append(score) plt.plot(np.arange(len(scores)), scores) plt.xlabel("# Features") plt.ylabel("AUC") max_index = np.argmax(scores) sel_index = 18
notebooks/2016-11-01-dvro-feature-selection.ipynb
dvro/sf-open-data-analysis
mit
Based on this results, we'll select the N features
selected_features_2 = np.array(selected_features_1)[indices[:sel_index]] selected_features_2
notebooks/2016-11-01-dvro-feature-selection.ipynb
dvro/sf-open-data-analysis
mit
It seems like the location and category features are more important than the date related features. On the date related features, the system also selected opened_dayofweek and opened_dayofmonth. Test Set
from sklearn.metrics import roc_curve, auc def find_cutoff(y_true, y_pred): fpr, tpr, threshold = roc_curve(y_true, y_pred) i = np.arange(len(tpr)) roc = pd.DataFrame({'tf' : pd.Series(tpr-(1-fpr), index=i), 'threshold' : pd.Series(threshold, index=i)}) roc_t = roc.ix[(roc.tf-0).abs().argsort()[:1]] return list(roc_t['threshold'])[0] from sklearn.feature_selection import SelectFromModel, SelectKBest from sklearn.pipeline import Pipeline def __feature_importance(X, y): forest = RandomForestClassifier(n_estimators=200, random_state=0) forest.fit(X, y) return forest.feature_importances_ pipe = Pipeline([ ('normalizer', MinMaxScaler()), ('selection_threshold', VarianceThreshold(threshold=(.999 * (1 - .999)))), ('selection_kbest', SelectKBest(__feature_importance, k=31)), ('classifier', RandomForestClassifier(n_estimators=100))]) pipe.fit(X_tra, y_tra) y_proba = pipe.predict_proba(X_tst) cutoff = find_cutoff(y_tst, y_proba[:,1]) from sklearn.metrics import roc_curve, auc fpr, tpr, thresh = roc_curve(y_tst, y_proba[:,1]) auc_roc = auc(fpr, tpr) print 'cuttoff {:.4f}'.format(cutoff) plt.title('ROC Curve') plt.plot(fpr, tpr, 'b', label='AUC = %0.2f'% auc_roc) plt.legend(loc='lower right') plt.plot([0,1],[0,1],'r--') plt.xlim([-0.1,1.2]) plt.ylim([-0.1,1.2]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() from sklearn.metrics import classification_report print classification_report(y_tst, y_proba[:,1] >= cutoff) import sqlite3 from sqlalchemy import create_engine SQL_ENGINE = create_engine('sqlite:///streetlight_cases.db') df.to_sql('new_data', SQL_ENGINE, if_exists='replace', index=False)
notebooks/2016-11-01-dvro-feature-selection.ipynb
dvro/sf-open-data-analysis
mit
Read in the list of questions There were 60 questions but due to a copy-paste error, there were some duplicates, so we only have 44 unique questions
# Read in the list of 60 questions AllQuestions = [] with open('questions60.csv', 'r') as csvfile: myreader = csv.reader(csvfile) for row in myreader: # the rstrip() removes blanks AllQuestions.append( row[0].rstrip() ) print('Found', len(AllQuestions), 'questions') questions = list(set(AllQuestions)) print('Found', len(questions), 'unique questions') # As we did for movies, make a dictionary to convert questions to numbers Question2index = {} for index,quest in enumerate( questions ): Question2index[quest] = index # sample usage: print('The question ', questions[40],' has index', Question2index[questions[40]])
notebooks/20Q/loadMechanicalTurkData.ipynb
jamesfolberth/NGC_STEM_camp_AWS
bsd-3-clause
Read in the training data The columns of X correspond to questions, and rows correspond to more data. The rows of y are the movie indices. The values of X are 1, -1 or 0 (see YesNoDict for encoding)
YesNoDict = { "Yes": 1, "No": -1, "Unsure": 0, "": 0 } # load from csv files X = [] y = [] with open('MechanicalTurkResults_149movies_X.csv','r') as csvfile: myreader = csv.reader(csvfile) for row in myreader: X.append( list(map(int,row)) ) with open('MechanicalTurkResults_149movies_y.csv','r') as csvfile: myreader = csv.reader(csvfile) for row in myreader: y = list(map(int,row))
notebooks/20Q/loadMechanicalTurkData.ipynb
jamesfolberth/NGC_STEM_camp_AWS
bsd-3-clause
Use the trained classifier to play a 20 questions game You can see the list of movies we trained on here: https://docs.google.com/spreadsheets/d/1-849aPzi8Su_c5HwwDFERrogXjvSaZFfp_y9MHeO1IA/edit?usp=sharing You may want to use from sklearn.tree import _tree and 'tree.DecisionTreeClassifier' with commands like tree_.children_left[node], tree_.value[node], tree_.feature[node], and `tree_.threshold[node]'.
# up to you
notebooks/20Q/loadMechanicalTurkData.ipynb
jamesfolberth/NGC_STEM_camp_AWS
bsd-3-clause
If you have used the Internet, you have probably seen videos of kittens unrolling toilet paper. And you might have wondered how long it would take a standard kitten to unroll 47 m of paper, the length of a standard roll. The interactions of the kitten and the paper rolls are complex. To keep things simple, let's assume that the kitten pulls down on the free end of the roll with constant force. And let's neglect the friction between the roll and the axle. This diagram shows the paper roll with the force applied by the kitten, $F$, the lever arm of the force around the axis of rotation, $r$, and the resulting torque, $\tau$. Assuming that the force applied by the kitten is 0.002 N, how long would it take to unroll a standard roll of toilet paper? We'll use the same parameters as in Chapter 24:
Rmin = 0.02 # m Rmax = 0.055 # m Mcore = 15e-3 # kg Mroll = 215e-3 # kg L = 47 # m tension = 0.002 # N
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
Rmin and Rmax are the minimum and maximum radius of the roll, respectively. Mcore is the weight of the core (the cardboard tube at the center) and Mroll is the total weight of the paper. L is the unrolled length of the paper. tension is the force the kitten applies by pulling on the loose end of the roll (I chose this value because it yields reasonable results). In Chapter 24 we defined $k$ to be the constant that relates a change in the radius of the roll to a change in the rotation of the roll: $$dr = k~d\theta$$ And we derived the equation for $k$ in terms of $R_{min}$, $R_{max}$, and $L$. $$k = \frac{1}{2L} (R_{max}^2 - R_{min}^2)$$ So we can compute k like this:
k = (Rmax**2 - Rmin**2) / 2 / L k
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
Moment of Inertia To compute angular acceleration, we'll need the moment of inertia for the roll. At http://modsimpy.com/moment you can find moments of inertia for simple geometric shapes. I'll model the core as a "thin cylindrical shell", and the paper roll as a "thick-walled cylindrical tube with open ends". The moment of inertia for a thin shell is just $m r^2$, where $m$ is the mass and $r$ is the radius of the shell. For a thick-walled tube the moment of inertia is $$I = \frac{\pi \rho h}{2} (r_2^4 - r_1^4)$$ where $\rho$ is the density of the material, $h$ is the height of the tube (if we think of the roll oriented vertically), $r_2$ is the outer diameter, and $r_1$ is the inner diameter. Since the outer diameter changes as the kitten unrolls the paper, we have to compute the moment of inertia, at each point in time, as a function of the current radius, r, like this:
def moment_of_inertia(r): """Moment of inertia for a roll of toilet paper. r: current radius of roll in meters returns: moment of inertia in kg m**2 """ Icore = Mcore * Rmin**2 Iroll = np.pi * rho_h / 2 * (r**4 - Rmin**4) return Icore + Iroll
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
Icore is the moment of inertia of the core; Iroll is the moment of inertia of the paper. rho_h is the density of the paper in terms of mass per unit of area. To compute rho_h, we compute the area of the complete roll like this:
area = np.pi * (Rmax**2 - Rmin**2) area
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
And divide the mass of the roll by that area.
rho_h = Mroll / area rho_h
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
As an example, here's the moment of inertia for the complete roll.
moment_of_inertia(Rmax)
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
As r decreases, so does I. Here's the moment of inertia when the roll is empty.
moment_of_inertia(Rmin)
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
The way $I$ changes over time might be more of a problem than I have made it seem. In the same way that $F = m a$ only applies when $m$ is constant, $\tau = I \alpha$ only applies when $I$ is constant. When $I$ varies, we usually have to use a more general version of Newton's law. However, I believe that in this example, mass and moment of inertia vary together in a way that makes the simple approach work out. A friend of mine who is a physicist is not convinced; nevertheless, let's proceed on the assumption that I am right. Simulation The state variables we'll use are theta, the total rotation of the roll in radians, omega, angular velocity in rad / s, r, the radius of the roll, and y, the length of the unrolled paper. Here's a State object with the initial conditions.
init = State(theta=0, omega=0, y=0, r=Rmax) init
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
And here's a System object with the starting conditions and t_end.
system = System(init=init, t_end=120)
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
You can take it from here. Exercise: Write a slope function we can use to simulate this system. Test it with the initial conditions. The results should be approximately 0.0, 0.294, 0.0, 0.0
# Solution goes here # Solution goes here
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
Exercise: Write an event function that stops the simulation when y equals L, that is, when the entire roll is unrolled. Test your function with the initial conditions.
# Solution goes here # Solution goes here
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
Now run the simulation.
# Solution goes here
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
And check the results.
results.tail()
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
The final value of theta should be about 200 rotations, the same as in Chapter 24. The final value of omega should be about 63 rad/s, which is about 10 revolutions per second. That's pretty fast, but it might be plausible. The final value of y should be L, which is 47 m. The final value of r should be Rmin, which is 0.02 m. And the total unrolling time should be about 76 seconds, which seems plausible. The following cells plot the results. theta increases slowly at first, then accelerates.
results.theta.plot(color='C0', label='theta') decorate(xlabel='Time (s)', ylabel='Angle (rad)')
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
Angular velocity, omega, increases almost linearly at first, as constant force yields almost constant torque. Then, as the radius decreases, the lever arm decreases, yielding lower torque, but moment of inertia decreases even more, yielding higher angular acceleration.
results.omega.plot(color='C2', label='omega') decorate(xlabel='Time (s)', ylabel='Angular velocity (rad/s)')
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
y increases slowly and then accelerates.
results.y.plot(color='C1', label='y') decorate(xlabel='Time (s)', ylabel='Length (m)')
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
r decreases slowly, then accelerates.
results.r.plot(color='C4', label='r') decorate(xlabel='Time (s)', ylabel='Radius (m)')
examples/kitten.ipynb
AllenDowney/ModSimPy
mit
Show available processes
for process in wps.processes: print process.identifier,":", process.title
wps-cfchecker.ipynb
cehbrecht/demo-notebooks
apache-2.0
Show details about qa_cfchecker process
process = wps.describeprocess(identifier='qa_cfchecker') for inp in process.dataInputs: print inp.identifier, ":", inp.title, ":", inp.dataType
wps-cfchecker.ipynb
cehbrecht/demo-notebooks
apache-2.0
Check file available on http service
inputs = [('dataset', 'http://bovec.dkrz.de:8090/wpsoutputs/hummingbird/output-b9855b08-42d8-11e6-b10f-abe4891050e3.nc')] execution = wps.execute(identifier='qa_cfchecker', inputs=inputs, output='output', async=False) print execution.status for out in execution.processOutputs: print out.title, out.reference
wps-cfchecker.ipynb
cehbrecht/demo-notebooks
apache-2.0
Prepare local file to send to service To send a local file with the request the file needs to be base64 encoded.
from owslib.wps import ComplexDataInput import base64 fp = open("/home/pingu/tmp/input2.nc", 'r') text = fp.read() fp.close() encoded = base64.b64encode(text) content = ComplexDataInput(encoded) inputs = [ ('dataset', content) ] execution = wps.execute(identifier='qa_cfchecker', inputs=inputs, output='output', async=False) print execution.status for out in execution.processOutputs: print out.title, out.reference
wps-cfchecker.ipynb
cehbrecht/demo-notebooks
apache-2.0
The figure below illustrates the terminology: <a ><img src = "https://ibm.box.com/shared/static/wsl6jcfld2c3171ob19vjr5chw9gyxrc.png" width = 500, align = "center"></a> <h4 align=center> A labeled function </h4> We can obtain help about a function :
help(add)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
We can call the function:
add(1)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
If we call the function with a new input we get a new result:
add(2)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
We can create different functions. For example, we can create a function that multiplies two numbers. The numbers will be represented by the variables a and b:
def Mult(a,b): c=a*b return(c)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
The same function can be used for different data types. For example, we can multiply two integers:
Mult(2,3)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
Two Floats:
Mult(10,3.14)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
We can even replicate a string by multiplying with an integer:
Mult(2,"Michael Jackson ")
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
Come up with a function that divides the first input by the second input:
def divide_values(a, b): return a / b
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
<div align="right"> <a href="#q1" class="btn btn-default" data-toggle="collapse">Click here for the solution</a> </div> <div id="q1" class="collapse"> ``` def div(a,b): return(a/b) ``` </div> <h3>Variables </h3> The input to a function is called a formal parameter. A variable that is declared inside a function is called a local variable. The parameter only exists within the function (i.e. the point where the function starts and stops). A variable that is declared outside a function definition is a global variable, and its value is accessible and modifiable throughout the program. We will discuss more about global variables at the end of the lab.
#Function Definition def square(a): """Square the input and add one """ #Local variable b=1 c=a*a+b; print(a, "if you square +1 ",c) return(c)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
The labels are displayed in the figure: <a ><img src = "https://ibm.box.com/shared/static/gpfa525nnfwxt5rhrvd3o6i8rp2iwsai.png" width = 500, align = "center"></a> <h4 align=center> Figure 2: A function with labeled variables </h4> We can call the function with an input of 3:
#Initializes Global variable x=3 #Makes function call and return function a y z=square(x) z
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
We can call the function with an input of 2 in a different manner:
square(2)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
If there is no return statement, the function returns None. The following two functions are equivalent:
def MJ(): print('Michael Jackson') def MJ1(): print('Michael Jackson') return(None) MJ() MJ1()
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
Printing the function after a call reveals a None is the default return statement:
print(MJ()) print(MJ1())
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
Create a function con that concatenates two strings using the addition operation: :
def con(a,b): return(a+b)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
<div align="right"> <a href="#q2" class="btn btn-default" data-toggle="collapse">Click here for the solution</a> </div> <div id="q2" class="collapse"> ``` def div(a,b): return(a+b) ``` </div> Can the same function be used to add to integers or strings?
print(con(1, 2)) print(con('1', '2'))
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
<div align="right"> <a href="#q3" class="btn btn-default" data-toggle="collapse">Click here for the solution</a> </div> <div id="q3" class="collapse"> ``` yes,for example: con(2,2) ``` </div> Can the same function be used to concentrate a list or tuple?
print(con([1], [2]))
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
<div align="right"> <a href="#q4" class="btn btn-default" data-toggle="collapse">Click here for the solution</a> </div> <div id="q4" class="collapse"> ``` yes,for example: con(['a',1],['b',1]) ``` </div> <h3><b>Pre-defined functions</b></h3> There are many pre-defined functions in Python, so let's start with the simple ones. The print() function:
album_ratings = [10.0,8.5,9.5,7.0,7.0,9.5,9.0,9.5] print(album_ratings)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
The sum() function adds all the elements in a list or tuple:
sum(album_ratings)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
The length function returns the length of a list or tuple:
len(album_ratings)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
<div class="alert alert-success alertsuccess" style="margin-top: 20px"> <h4> [Tip] How do I learn more about the pre-defined functions in Python? </h4> <p></p> We will be introducing a variety of **pre-defined functions** to you as you learn more about Python. There are just too many functions, so there's no way we can teach them all in one sitting. But if you'd like to take a quick peek, here's a short reference card for some of the commonly-used pre-defined functions: http://www.astro.up.pt/~sousasag/Python_For_Astronomers/Python_qr.pdf </div> <h3>Functions Makes Things Simple </h3> Consider the two lines of code in Block 1 and Block 2: the procedure for each block is identical. The only thing that is different is the variable names and values. Block 1:
a1=4; b1=5; c1=a1+b1+2*a1*b1-1 if(c1<0): c1=0; else: c1=5; c1
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
Block 2:
a2=0; b2=0; c2=a2+b2+2*a2*b2-1 if(c2<0): c2=0; else: c2=5; c2
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
We can replace the lines of code with a function. A function combines many instructions into a single line of code. Once a function is defined, it can be used repeatedly. You can invoke the same function many times in your program. You can save your function and use it in another program or use someone else’s function. The lines of code in code block 1 and code block 2 can be replaced by the following function:
def Equation(a,b): c=a+b+2*a*b-1 if(c<0): c=0 else: c=5 return(c)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
This function takes two inputs, a and b, then applies several operations to return c. We simply define the function, replace the instructions with the function, and input the new values of a1,b1 and a2,b2 as inputs. The entire process is demonstrated in the figure: <a ><img src = "https://ibm.box.com/shared/static/efn4rii75bgytjdb5c8ek6uezch7yaxq.gif" width = 1100, align = "center"></a> <h4 align=center> Example of a function used to replace redundant lines of code </h4> Code Blocks 1 and Block 2 can now be replaced with code Block 3 and code Block 4. Block 3:
a1=4; b1=5; c1=Equation(a1,b1) c1
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
Block 4:
a2=0; b2=0; c2=Equation(a2,b2) c2
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
<hr> <a id='ref3'></a> <center><h2>Using if/else statements and loops in functions</h2></center> The return() function is particularly useful if you have any IF statements in the function, when you want your output to be dependent on some condition:
def type_of_album(artist,album,year_released): if year_released > 1980: print(artist,album,year_released) return "Modern" else: print(artist,album,year_released) return "Oldie" x = type_of_album("Michael Jackson","Thriller",1980) print(x)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
We can use a loop in a function. For example, we can print out each element in a list:
def PrintList(the_list): for element in the_list: print(element) PrintList(['1',1,'the man',"abc"])
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
<hr> <a id='ref4'></a> <center><h2>Setting default argument values in your custom functions</h2></center> You can set a default value for arguments in your function. For example, in the isGoodRating() function, what if we wanted to create a threshold for what we consider to be a good rating? Perhaps by default, we should have a default rating of 4:
def isGoodRating(rating=4): if(rating < 7): print("this album sucks it's rating is",rating) else: print("this album is good its rating is",rating)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
<hr>
isGoodRating() isGoodRating(10)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
<a id='ref6'></a> <center><h2>Global variables</h2></center> <br> So far, we've been creating variables within functions, but we have not discussed variables outside the function. These are called global variables. <br> Let's try to see what printer1 returns:
artist = "Michael Jackson" def printer1(artist): internal_var = artist print(artist,"is an artist") printer1(artist)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
If we print internal_var we get an error. We got a Name Error: name 'internal_var' is not defined. Why? It's because all the variables we create in the function is a local variable, meaning that the variable assignment does not persist outside the function. But there is a way to create global variables from within a function as follows:
artist = "Michael Jackson" def printer(artist): global internal_var internal_var= "Whitney Houston" print(artist,"is an artist") printer(artist) printer(internal_var)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
<a id='ref7'></a> <center><h2>Scope of a Variable</h2></center> <hr> The scope of a variable is the part of that program where that variable is accessible. Variables that are declared outside of all function definitions, such as the myFavouriteBand variable in the code shown here, are accessible from anywhere within the program. As a result, such variables are said to have global scope, and are known as global variables. myFavouriteBand is a global variable, so it is accessible from within the getBandRating function, and we can use it to determine a band's rating. We can also use it outside of the function, such as when we pass it to the print function to display it:
myFavouriteBand = "AC/DC" def getBandRating(bandname): if bandname == myFavouriteBand: return 10.0 else: return 0.0 print("AC/DC's rating is:", getBandRating("AC/DC")) print("Deep Purple's rating is:",getBandRating("Deep Purple")) print("My favourite band is:", myFavouriteBand)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
Take a look at this modified version of our code. Now the myFavouriteBand variable is defined within the getBandRating function. A variable that is defined within a function is said to be a local variable of that function. That means that it is only accessible from within the function in which it is defined. Our getBandRating function will still work, because myFavouriteBand is still defined within the function. However, we can no longer print myFavouriteBand outside our function, because it is a local variable of our getBandRating function; it is only defined within the getBandRating function:
def getBandRating(bandname): myFavouriteBand = "AC/DC" if bandname == myFavouriteBand: return 10.0 else: return 0.0 print("AC/DC's rating is: ", getBandRating("AC/DC")) print("Deep Purple's rating is: ", getBandRating("Deep Purple")) print("My favourite band is", myFavouriteBand)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
Finally, take a look at this example. We now have two myFavouriteBand variable definitions. The first one of these has a global scope, and the second of them is a local variable within the getBandRating function. Within the getBandRating function, the local variable takes precedence. Deep Purple will receive a rating of 10.0 when passed to the getBandRating function. However, outside of the getBandRating function, the getBandRating s local variable is not defined, so the myFavouriteBand variable we print is the global variable, which has a value of AC/DC:
myFavouriteBand = "AC/DC" def getBandRating(bandname): myFavouriteBand = "Deep Purple" if bandname == myFavouriteBand: return 10.0 else: return 0.0 print("AC/DC's rating is:",getBandRating("AC/DC")) print("Deep Purple's rating is: ",getBandRating("Deep Purple")) print("My favourite band is:",myFavouriteBand)
coursera/python_for_data_science/3.3_Functions.ipynb
mohanprasath/Course-Work
gpl-3.0
You might be tempted to encode this data with a straightforward numerical mapping:
{'Queen Anne': 1, 'Fremont': 2, 'Wallingford': 3}; # It turns out that this is not generally a useful approach
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
A fundamental assumption: numerical features reflect algebraic quantities. Queen Anne < Fremont < Wallingford Wallingford - Queen Anne = Fremont It does not make much sense. One-hot encoding (Dummy coding) effectively creates extra columns indicating the presence or absence of a category with a value of 1 or 0, respectively. - When your data comes as a list of dictionaries - Scikit-Learn's DictVectorizer will do this for you:
from sklearn.feature_extraction import DictVectorizer vec = DictVectorizer(sparse=False, dtype=int ) vec.fit_transform(data)
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
Notice the 'neighborhood' column has been expanded into three separate columns (why not four?) representing the three neighborhood labels, and that each row has a 1 in the column associated with its neighborhood. To see the meaning of each column, you can inspect the feature names:
vec.get_feature_names()
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
There is one clear disadvantage of this approach: - if your category has many possible values, this can greatly increase the size of your dataset. - However, because the encoded data contains mostly zeros, a sparse output can be a very efficient solution:
vec = DictVectorizer(sparse=True, dtype=int) vec.fit_transform(data)
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
Many (though not yet all) of the Scikit-Learn estimators accept such sparse inputs when fitting and evaluating models. two additional tools that Scikit-Learn includes to support this type of encoding: - sklearn.preprocessing.OneHotEncoder - sklearn.feature_extraction.FeatureHasher Text Features Another common need in feature engineering is to convert text to a set of representative numerical values. Most automatic mining of social media data relies on some form of encoding the text as numbers. - One of the simplest methods of encoding data is by word counts: - you take each snippet of text, count the occurrences of each word within it, and put the results in a table. For example, consider the following set of three phrases:
sample = ['problem of evil', 'evil queen', 'horizon problem']
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
For a vectorization of this data based on word count, we could construct a column representing the word "problem," the word "evil," the word "horizon," and so on. While doing this by hand would be possible, the tedium can be avoided by using Scikit-Learn's CountVectorizer:
from sklearn.feature_extraction.text import CountVectorizer vec = CountVectorizer() X = vec.fit_transform(sample) X
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
The result is a sparse matrix recording the number of times each word appears; it is easier to inspect if we convert this to a DataFrame with labeled columns:
import pandas as pd pd.DataFrame(X.toarray(), columns=vec.get_feature_names())
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
Problem: The raw word counts put too much weight on words that appear very frequently. term frequency-inverse document frequency (TF–IDF) weights the word counts by a measure of how often they appear in the documents. The syntax for computing these features is similar to the previous example:
from sklearn.feature_extraction.text import TfidfVectorizer vec = TfidfVectorizer() X = vec.fit_transform(sample) pd.DataFrame(X.toarray(), columns=vec.get_feature_names())
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
For an example of using TF-IDF in a classification problem, see In Depth: Naive Bayes Classification. Image Features The simplest approach is what we used for the digits data in Introducing Scikit-Learn: simply using the pixel values themselves. - But depending on the application, such approaches may not be optimal. - A comprehensive summary of feature extraction techniques for images in the Scikit-Image project. For one example of using Scikit-Learn and Scikit-Image together, see Feature Engineering: Working with Images. Derived Features Another useful type of feature is one that is mathematically derived from some input features. We saw an example of this in Hyperparameters and Model Validation when we constructed polynomial features from our input data. To convert a linear regression into a polynomial regression - not by changing the model - but by transforming the input! - basis function regression, and is explored further in In Depth: Linear Regression. For example, this data clearly cannot be well described by a straight line:
%matplotlib inline import numpy as np import matplotlib.pyplot as plt x = np.array([1, 2, 3, 4, 5]) y = np.array([4, 2, 1, 3, 7]) plt.scatter(x, y);
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
Still, we can fit a line to the data using LinearRegression and get the optimal result:
from sklearn.linear_model import LinearRegression X = x[:, np.newaxis] model = LinearRegression().fit(X, y) yfit = model.predict(X) plt.scatter(x, y) plt.plot(x, yfit);
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
We need a more sophisticated model to describe the relationship between $x$ and $y$. - One approach to this is to transform the data, - adding extra columns of features to drive more flexibility in the model. For example, we can add polynomial features to the data this way:
from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(degree=3, include_bias=False) X2 = poly.fit_transform(X) print(X2)
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
The derived feature matrix has one column representing $x$, and a second column representing $x^2$, and a third column representing $x^3$. Computing a linear regression on this expanded input gives a much closer fit to our data:
model = LinearRegression().fit(X2, y) yfit = model.predict(X2) plt.scatter(x, y) plt.plot(x, yfit);
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
This idea of improving a model not by changing the model, but by transforming the inputs, is fundamental to many of the more powerful machine learning methods. We explore this idea further in In Depth: Linear Regression in the context of basis function regression. More generally, this is one motivational path to the powerful set of techniques known as kernel methods, which we will explore in In-Depth: Support Vector Machines. Imputation of Missing Data Another common need in feature engineering is handling of missing data. Handling Missing Data NaN value is used to mark missing values. For example, we might have a dataset that looks like this:
from numpy import nan X = np.array([[ nan, 0, 3 ], [ 3, 7, 9 ], [ 3, 5, 2 ], [ 4, nan, 6 ], [ 8, 8, 1 ]]) y = np.array([14, 16, -1, 8, -5])
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
When applying a typical machine learning model to such data, we will need to first replace such missing data with some appropriate fill value. This is known as imputation of missing values - simple method, e.g., replacing missing values with the mean of the column - sophisticated method, e.g., using matrix completion or a robust model to handle such data - It tends to be very application-specific, and we won't dive into them here. For a baseline imputation approach, using the mean, median, or most frequent value, Scikit-Learn provides the Imputer class:
from sklearn.preprocessing import Imputer imp = Imputer(strategy='mean') X2 = imp.fit_transform(X) X2
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
We see that in the resulting data, the two missing values have been replaced with the mean of the remaining values in the column. This imputed data can then be fed directly into, for example, a LinearRegression estimator:
model = LinearRegression().fit(X2, y) model.predict(X2)
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
Feature Pipelines With any of the preceding examples, it can quickly become tedious to do the transformations by hand, especially if you wish to string together multiple steps. For example, we might want a processing pipeline that looks something like this: Impute missing values using the mean Transform features to quadratic Fit a linear regression To streamline this type of processing pipeline, Scikit-Learn provides a Pipeline object, which can be used as follows:
from sklearn.pipeline import make_pipeline model = make_pipeline(Imputer(strategy='mean'), PolynomialFeatures(degree=2), LinearRegression())
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
This pipeline looks and acts like a standard Scikit-Learn object, and will apply all the specified steps to any input data.
model.fit(X, y) # X with missing values, from above print(y) print(model.predict(X))
code/09.04-Feature-Engineering.ipynb
computational-class/cjc2016
mit
Use the following function to plot out the parameters of the Softmax function:
def PlotParameters(model): W=model.state_dict() ['linear.weight'].data w_min=W.min().item() w_max=W.max().item() fig, axes = plt.subplots(2, 5) fig.subplots_adjust(hspace=0.01, wspace=0.1) for i,ax in enumerate(axes.flat): if i<10: # Set the label for the sub-plot. ax.set_xlabel( "class: {0}".format(i)) # Plot the image. ax.imshow(W[i,:].view(28,28), vmin=w_min, vmax=w_max, cmap='seismic') ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show()
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
Use the following function to visualize the data:
def show_data(data_sample): plt.imshow(data_sample[0].numpy().reshape(28,28),cmap='gray') #print(data_sample[1].item()) plt.title('y= '+ str(data_sample[1].item()))
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
<a id="ref1"></a> <h2 align=center>Prepare Data </h2> Load the training dataset by setting the parameters <code>train</code> to <code>True</code> and convert it to a tensor by placing a transform object in the argument <code>transform</code>.
train_dataset=dsets.MNIST(root='./data', train=True, download=True, transform=transforms.ToTensor()) train_dataset
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
Load the testing dataset by setting the parameters train <code>False</code> and convert it to a tensor by placing a transform object in the argument <code>transform</code>.
validation_dataset=dsets.MNIST(root='./data', train=False, download=True, transform=transforms.ToTensor()) validation_dataset
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
You can see that the data type is long:
train_dataset[0][1].type()
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
Data Visualization Each element in the rectangular tensor corresponds to a number that represents a pixel intensity as demonstrated by the following image: <img src = "https://ibm.box.com/shared/static/7024mnculm8w2oh0080y71cpa48cib2k.png" width = 550, align = "center"></a> Print out the third label:
train_dataset[3][1]
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
Plot the 3rd sample:
show_data(train_dataset[3])
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
You see that it is a 1. Now, plot the second sample:
show_data(train_dataset[2])
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
<a id="ref3"></a> Build a Softmax Classifer Build a Softmax classifier class:
class SoftMax(nn.Module): def __init__(self,input_size,output_size): super(SoftMax,self).__init__() self.linear=nn.Linear(input_size,output_size) def forward(self,x): z=self.linear(x) return z
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
The Softmax function requires vector inputs. Note that the vector shape is 28x28.
train_dataset[0][0].shape
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
Flatten the tensor as shown in this image: <img src = "https://ibm.box.com/shared/static/0cjl5inks3d8ay0sckgywowc3hw2j1sa.gif" width = 550, align = "center"></a> The size of the tensor is now 784. <img src = "https://ibm.box.com/shared/static/lhezcvgm82gtdewooueopxp98ztq2pbv.png" width = 550, align = "center"></a> Set the input size and output size:
input_dim=28*28 output_dim=10 input_dim
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
<a id="ref3"></a> <h2> Define the Softmax Classifier, Criterion Function, Optimizer, and Train the Model</h2>
model=SoftMax(input_dim,output_dim) model
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
View the size of the model parameters:
print('W:',list(model.parameters())[0].size()) print('b',list(model.parameters())[1].size())
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
You can cover the model parameters for each class to a rectangular grid: <a> <img src = "https://ibm.box.com/shared/static/9cuuwsvhwygbgoogmg464oht1o8ubkg2.gif" width = 550, align = "center"></a> Plot the model parameters for each class:
PlotParameters(model)
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
Loss function:
criterion=nn.CrossEntropyLoss()
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
Optimizer class:
learning_rate=0.1 optimizer=torch.optim.SGD(model.parameters(), lr=learning_rate)
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
Define the dataset loader:
train_loader=torch.utils.data.DataLoader(dataset=train_dataset,batch_size=100) validation_loader=torch.utils.data.DataLoader(dataset=validation_dataset,batch_size=5000)
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
Train the model and determine validation accuracy (should take a few minutes):
n_epochs=10 loss_list=[] accuracy_list=[] N_test=len(validation_dataset) #n_epochs for epoch in range(n_epochs): for x, y in train_loader: #clear gradient optimizer.zero_grad() #make a prediction z=model(x.view(-1,28*28)) # calculate loss loss=criterion(z,y) # calculate gradients of parameters loss.backward() # update parameters optimizer.step() correct=0 #perform a prediction on the validation data for x_test, y_test in validation_loader: z=model(x_test.view(-1,28*28)) _,yhat=torch.max(z.data,1) correct+=(yhat==y_test).sum().item() accuracy=correct/N_test accuracy_list.append(accuracy) loss_list.append(loss.data) accuracy_list.append(accuracy)
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0
<a id="ref3"></a> <h2 align=center>Analyze Results</h2> Plot the loss and accuracy on the validation data:
fig, ax1 = plt.subplots() color = 'tab:red' ax1.plot(loss_list,color=color) ax1.set_xlabel('epoch',color=color) ax1.set_ylabel('total loss',color=color) ax1.tick_params(axis='y', color=color) ax2 = ax1.twinx() color = 'tab:blue' ax2.set_ylabel('accuracy', color=color) ax2.plot( accuracy_list, color=color) ax2.tick_params(axis='y', labelcolor=color) fig.tight_layout()
DL0110EN/3.3.2lab_predicting _MNIST_using_Softmax.ipynb
atlury/deep-opencl
lgpl-3.0