kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
10,598,995
X = pd.DataFrame(data=X, columns=tfidf.get_feature_names() )<drop_column>
sc = StandardScaler() features = train[['Pclass','Sex','Name','Fare']].values features = sc.fit_transform(features) target = train['Survived'].values
Titanic - Machine Learning from Disaster
10,598,995
df = df.drop(columns=['text', 'length'], axis=1 )<create_dataframe>
x_train, x_test, y_train, y_test = train_test_split(features, target, test_size = 0.25, random_state = 2 )
Titanic - Machine Learning from Disaster
10,598,995
X = pd.DataFrame(np.hstack(( df, X)) )<rename_columns>
def grid_search_cv(classifier,hyperparameters): GSCV = GridSearchCV(classifier,hyperparameters,cv = 10) model = GSCV.fit(x_test,y_test) print(model) print('HIGHEST ACCURACY : ','{0:.2%}'.format(model.best_score_)) print('BEST PARAMETERS : ',model.best_params_ )
Titanic - Machine Learning from Disaster
10,598,995
X.columns = ['sentence_id', 'president_id'] + tfidf.get_feature_names()<filter>
from sklearn.linear_model import LogisticRegression
Titanic - Machine Learning from Disaster
10,598,995
train = X[pd.isnull(X['sentence_id'])] test = X[pd.notnull(X['sentence_id'])]<drop_column>
classifier_lr = LogisticRegression(random_state = 0,C=10,penalty= 'l2' )
Titanic - Machine Learning from Disaster
10,598,995
X_train = train.drop(['sentence_id', 'president_id'], axis=1) X_test = test.drop(['sentence_id', 'president_id'], axis=1 )<categorify>
model_evaluation(classifier_lr )
Titanic - Machine Learning from Disaster
10,598,995
def one_hot_encode(label): vec = [0, 0, 0, 0, 0, 0] vec[label] = 1 return vec y_train = np.vstack(row for row in train['president_id'].apply(one_hot_encode ).values )<choose_model_class>
from sklearn.svm import SVC
Titanic - Machine Learning from Disaster
10,598,995
def create_model(lyrs=[X_train.shape[1], 1028, 512, 256], act='relu', opt='Adam', dr=0.25): model = Sequential() model.add(Dense(lyrs[0], input_dim=X_train.shape[1], activation=act)) for i in range(1,len(lyrs)) : model.add(Dense(lyrs[i], activation=act)) model.add(Dropout(dr)) model.add(Dense(6, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) return model<train_model>
classifier_svc = SVC(kernel = 'linear',C = 0.1 )
Titanic - Machine Learning from Disaster
10,598,995
training = model.fit(X_train, y_train, epochs=5, batch_size=64, validation_split=0.2) val_acc = np.mean(training.history['val_acc']) print(" %s: %.2f%%" %('val_acc', val_acc*100))<predict_on_test>
hyperparameters = {'C' : [0.01,0.1,1,10,100]} grid_search_cv(classifier_svc,hyperparameters )
Titanic - Machine Learning from Disaster
10,598,995
predictions = model.predict(X_test )<define_variables>
model_evaluation(classifier_svc )
Titanic - Machine Learning from Disaster
10,598,995
pred_lbls = [] for pred in predictions: pred = list(pred) max_value = max(pred) max_index = pred.index(max_value) pred_lbls.append(max_index) predictions = np.array(pred_lbls )<feature_engineering>
from sklearn.tree import DecisionTreeClassifier
Titanic - Machine Learning from Disaster
10,598,995
test['president_id'] = predictions<count_values>
classifier_dt = DecisionTreeClassifier(criterion = 'entropy' )
Titanic - Machine Learning from Disaster
10,598,995
test['president_id'].value_counts()<save_to_csv>
model_evaluation(classifier_dt )
Titanic - Machine Learning from Disaster
10,598,995
submission = test[['sentence_id','president_id']] submission.columns = ['sentence', 'president'] submission.to_csv('rnn_1.csv', index=False )<count_values>
from sklearn.ensemble import RandomForestClassifier
Titanic - Machine Learning from Disaster
10,598,995
submission.president.value_counts()<import_modules>
classifier_rf = RandomForestClassifier(max_depth = 2,random_state = 0 )
Titanic - Machine Learning from Disaster
10,598,995
%matplotlib inline sns.set_style("whitegrid") <load_from_csv>
model_evaluation(classifier_rf )
Titanic - Machine Learning from Disaster
10,598,995
train_data = pd.read_csv('.. /input/train.csv' )<feature_engineering>
from sklearn.neighbors import KNeighborsClassifier
Titanic - Machine Learning from Disaster
10,598,995
train_data['president'] = train_data['president'].replace('deKlerk',0) train_data['president'] = train_data['president'].replace('Mandela',1) train_data['president'] = train_data['president'].replace('Mbeki',2) train_data['president'] = train_data['president'].replace('Motlanthe',3) train_data['president'] = train_data['president'].replace('Zuma',4) train_data['president'] = train_data['president'].replace('Ramaphosa',5 )<prepare_output>
from sklearn.neighbors import KNeighborsClassifier
Titanic - Machine Learning from Disaster
10,598,995
faxis = train_data.copy() faxis.head()<create_dataframe>
from sklearn.neighbors import KNeighborsClassifier
Titanic - Machine Learning from Disaster
10,598,995
train_donut = [] for i, row in faxis.iterrows() : for text in row['text'].split('.'): train_donut.append([row['president'], text]) train_donut = pd.DataFrame(train_donut, columns=['president', 'text']) train_donut<feature_engineering>
classifier_knn = KNeighborsClassifier(leaf_size = 7, n_neighbors = 3,p = 1 )
Titanic - Machine Learning from Disaster
10,598,995
train_donut['text'] = train_donut['text'].str.lower() train_donut.head()<feature_engineering>
leaf_size = list(range(1,50)) n_neighbors = list(range(1,30)) p=[1,2] hyperparameters = dict(leaf_size=leaf_size, n_neighbors=n_neighbors, p=p) grid_search_cv(classifier_knn,hyperparameters )
Titanic - Machine Learning from Disaster
10,598,995
def remove_punctuation(text): punc = ''.join([l for l in text if l not in string.punctuation]) return punc train_donut['text'] = train_donut['text'].apply(remove_punctuation )<feature_engineering>
model(classifier_knn )
Titanic - Machine Learning from Disaster
10,598,995
train_donut['text'] = train_donut['text'].apply(lambda x: x.replace('”','')) train_donut['text'] = train_donut['text'].apply(lambda x: x.replace('“','')) train_donut['text'] = train_donut['text'].apply(lambda x: x.replace('‘','')) train_donut['text'] = train_donut['text'].apply(lambda x: x.replace('ê','e')) train_donut['text'] = train_donut['text'].apply(lambda x: x.replace('�',''))<categorify>
model_evaluation(classifier_knn )
Titanic - Machine Learning from Disaster
10,598,995
train_donut['text'].replace(' ', np.nan, inplace=True) train_donut['text'].replace('', np.nan, inplace=True) train_donut.dropna(subset=['text'], inplace=True) train_donut <feature_engineering>
models = {'MODELS':['LOGISTIC REGRESSION','SUPPORT VECTOR CLASSIFIER','DECISION TREE CLASSIFIER','RANDOM FOREST CLASSIFIER','K-NEAREST NEIGHBORS'], 'CROSS VAL ACCURACY(%)':[78.73,79.19,81.59,78.29,81.10]}
Titanic - Machine Learning from Disaster
10,598,995
tokeniser = TreebankWordTokenizer() train_donut['tokens'] = train_donut['text'].apply(tokeniser.tokenize) train_donut.head()<choose_model_class>
cross_val = pd.DataFrame(models) cross_val.head()
Titanic - Machine Learning from Disaster
10,598,995
lemmatizer = WordNetLemmatizer()<categorify>
test = pd.read_csv('.. /input/titanic/test.csv') test.head()
Titanic - Machine Learning from Disaster
10,598,995
def donut_lemma(words, lemmatizer): lemma = [lemmatizer.lemmatize(word)for word in words] return lemma<feature_engineering>
passenger_id = test['PassengerId'] test = test.drop(columns = ['PassengerId','Age','SibSp','Parch','Ticket','Cabin','Embarked']) test.head()
Titanic - Machine Learning from Disaster
10,598,995
train_donut['lemma'] = train_donut['text'].apply(donut_lemma, args=(lemmatizer,)) train_donut.head(12 )<train_model>
test.isnull().sum()
Titanic - Machine Learning from Disaster
10,598,995
nltk.download('stopwords') stop = stopwords.words('english') vect = CountVectorizer(stop_words=stop, min_df=2, max_df=0.5, ngram_range=(1, 2)) vect.fit(train_donut['text'] )<prepare_x_and_y>
test.isnull().sum()
Titanic - Machine Learning from Disaster
10,598,995
X = vect.fit_transform(train_donut['text']) X = X.toarray() Y = train_donut['president']<split>
test['Fare'] = test['Fare'].fillna(7.75 )
Titanic - Machine Learning from Disaster
10,598,995
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.001, random_state=42) print('X_train', X_train.shape) print('X_test', X_test.shape) print('y_train', y_train.shape) print('y_test', y_test.shape )<compute_train_metric>
get_initials(test,'Name') test['Name'] = le.fit_transform(test['Name']) test['Sex'] = le.fit_transform(test['Sex']) test.head()
Titanic - Machine Learning from Disaster
10,598,995
lrc = LogisticRegression(random_state = 0) lrc.fit(X_train, y_train) print("TRAINING SET") print("Accuracy: ", lrc.score(X_train, y_train)) print("Confusion Matrix:") print(confusion_matrix(y_train, lrc.predict(X_train))) print("Logistic Regression Classification Report:") print(classification_report(y_train, lrc.predict(X_train))) print("") print("TEST SET") print("Accuracy: ", lrc.score(X_test, y_test)) print("Confusion Matrix:") print(confusion_matrix(y_test, lrc.predict(X_test))) print("Logistic Regression Classification Report:") print(classification_report(y_test, lrc.predict(X_test))) <load_from_csv>
test = sc.fit_transform(test )
Titanic - Machine Learning from Disaster
10,598,995
pres_speech = pd.read_csv('.. /input/test.csv' )<feature_engineering>
prediction = classifier_knn.predict(test) submission = pd.DataFrame({'PassengerId':passenger_id,'Survived':prediction}) submission.to_csv('TITANIC_SUBMISSION.csv',index = False) submission
Titanic - Machine Learning from Disaster
10,598,995
pres_speech['text'] = pres_speech['text'].str.lower() def remove_punctuation(text): punc = ''.join([l for l in text if l not in string.punctuation]) return punc pres_speech['text'] = pres_speech['text'].apply(remove_punctuation) pres_speech['text'] = pres_speech['text'].apply(lambda x: x.replace('”','')) pres_speech['text'] = pres_speech['text'].apply(lambda x: x.replace('“','')) pres_speech['text'] = pres_speech['text'].apply(lambda x: x.replace('‘','')) pres_speech['text'] = pres_speech['text'].apply(lambda x: x.replace('ê','e')) pres_speech['text'] = pres_speech['text'].apply(lambda x: x.replace('�','')) pres_speech['text'].replace(' ', np.nan, inplace=True) pres_speech['text'].replace('', np.nan, inplace=True) pres_speech.dropna(subset=['text'], inplace=True) pres_speech<prepare_x_and_y>
%matplotlib inline pd.options.display.float_format = '{:.2f}'.format
Titanic - Machine Learning from Disaster
10,598,995
X_Test = vect.transform(pres_speech['text']) X_Test = X_Test.toarray() Y = pres_speech['sentence']<prepare_x_and_y>
train = pd.read_csv('.. /input/titanic/train.csv') train.head()
Titanic - Machine Learning from Disaster
10,598,995
Xt_Train = vect.transform(train_donut['text']) Xt_Train = Xt_Train.toarray() Y_train = train_donut['president']<predict_on_test>
train.isnull().sum()
Titanic - Machine Learning from Disaster
10,598,995
Pred_Train = lrc.predict(Xt_Train )<prepare_output>
train.isnull().sum()
Titanic - Machine Learning from Disaster
10,598,995
Train_Submit = pd.DataFrame( { 'President': Y_train, 'Pred_president': Pred_Train, }) Train_Submit.head(10 )<predict_on_test>
sur = train['Survived'].value_counts() survival_rate = [sur[0]/len(train)*100,sur[1]/len(train)*100]
Titanic - Machine Learning from Disaster
10,598,995
Pred_Pres = lrc.predict(X_Test )<create_dataframe>
survived_male = len(train[(train['Sex'] == 'male')&(train['Survived'] == 1)])/len(train[train['Sex'] == 'male'])*100 survived_female = len(train[(train['Sex'] == 'female')&(train['Survived'] == 1)])/len(train[train['Sex'] == 'female'])*100 male = [survived_male,100-survived_male] female = [survived_female,100-survived_female]
Titanic - Machine Learning from Disaster
10,598,995
Test_Submit = pd.DataFrame( { 'sentence': Y, 'president': Pred_Pres, }) <drop_column>
survived_class_1 = len(train[(train['Pclass'] == 1)& train['Survived'] == 1])/len(train[(train['Pclass'] == 1)])*100 survived_class_2 = len(train[(train['Pclass'] == 2)& train['Survived'] == 1])/len(train[(train['Pclass'] == 2)])*100 survived_class_3 = len(train[(train['Pclass'] == 3)& train['Survived'] == 1])/len(train[(train['Pclass'] == 3)])*100 class_1 = [survived_class_1,100-survived_class_1] class_2 = [survived_class_2,100-survived_class_2] class_3 = [survived_class_3,100-survived_class_3]
Titanic - Machine Learning from Disaster
10,598,995
Test_Submit = Test_Submit.reset_index(drop=True )<save_to_csv>
survived_S = len(train[(train['Embarked'] == 'S')&(train['Survived'] == 1)])/len(train[train['Embarked'] == 'S'])*100 survived_C = len(train[(train['Embarked'] == 'C')&(train['Survived'] == 1)])/len(train[train['Embarked'] == 'C'])*100 survived_Q = len(train[(train['Embarked'] == 'Q')&(train['Survived'] == 1)])/len(train[train['Embarked'] == 'Q'])*100 S = [survived_S,100-survived_S] C = [survived_C,100-survived_C] Q = [survived_Q,100-survived_Q]
Titanic - Machine Learning from Disaster
10,598,995
Test_Submit.to_csv('test_data_submission_donut_squad.csv',index=False )<import_modules>
train['Age_Mode'] = train['Age'].fillna(value = 24) train['Age_Med'] = train['Age'].fillna(train['Age'].median()) train['Age_Mean'] = train['Age'].fillna(train['Age'].mean() )
Titanic - Machine Learning from Disaster
10,598,995
import pandas as pd import sklearn import os import numpy as np import matplotlib.pyplot as plt from sklearn.neighbors import KNeighborsRegressor from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_log_error from sklearn import linear_model from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import AdaBoostRegressor <load_from_csv>
train['Age'] = train['Age'].fillna(train['Age'].median()) train = train.drop(columns= ['Age_Mode','Age_Med','Age_Mean']) train.head()
Titanic - Machine Learning from Disaster
10,598,995
train_data = pd.read_csv('.. /input/train.csv', engine='python') test_data = pd.read_csv('.. /input/test.csv', engine='python' )<sort_values>
def get_age_group(dataframe,column_name): dataframe[column_name] = dataframe[column_name].apply(np.ceil) age_group = {0:list(range(0,21)) ,1:list(range(21,41)) ,2:list(range(41,61)) ,3:list(range(61,81)) } col = list(dataframe.columns) index = col.index(column_name) age = [] for j in range(len(dataframe)) : for k in age_group.keys() : for i in range(len(age_group[k])) : if(age_group[k][i] == dataframe.iloc[j,index]): age.append(k) dataframe['Age_Group'] = age
Titanic - Machine Learning from Disaster
10,598,995
with pd.option_context('display.max_rows', None, 'display.max_columns', None): corr_matrix = abs(train_data.corr()) display(corr_matrix['median_house_value'].sort_values(ascending=False))<prepare_x_and_y>
def get_initials(dataframe,column_name): sub = [] initials = ['Mrs.','Ms.','Mr.','Miss.','Master.','Lady.','Don.','Rev.','Dr.','Mme.','Major.','Sir.','Mlle.','Col.','Capt.','Countess.','Jonkheer.','Dona.'] name = dataframe[column_name] for i in range(len(name)) : split_names = name[i].split() for j in range(len(split_names)) : if(split_names[j] in initials): sub.append(split_names[j]) dataframe[column_name] = sub
Titanic - Machine Learning from Disaster
10,598,995
Xtrain = train_data Xtrain = Xtrain.drop('Id', axis=1) Xtrain = Xtrain.drop('median_house_value', axis=1) Ytrain = train_data['median_house_value']<compute_test_metric>
get_initials(train,'Name') train['Name'].value_counts()
Titanic - Machine Learning from Disaster
10,598,995
def rmsle(real, predicted): sum=0.0 for x in range(len(predicted)) : if predicted[x]<0 or real[x]<0: continue p = np.log(predicted[x]+1) r = np.log(real[x]+1) sum = sum +(p - r)**2 return(( sum/len(predicted)) **0.5 )<find_best_model_class>
le = LabelEncoder() train['Name'] = le.fit_transform(train['Name']) encoded_values = train['Name'].unique() decoded_values = le.inverse_transform(encoded_values) initials = {} for i in range(len(encoded_values)) : initials.setdefault(decoded_values[i],encoded_values[i] )
Titanic - Machine Learning from Disaster
10,598,995
def results(Xtrain, Ytrain): R=[] accuracy = 0.0 error = 1.0 best_accuracy = 0 best_error = 0 n = 11 knn = KNeighborsRegressor(n_neighbors=n) knn.fit(Xtrain, Ytrain) scores = cross_val_score(knn, Xtrain, Ytrain, cv=10) Ypred = knn.predict(Xtrain) knn_score = scores.mean() knn_rmsle = rmsle(Ytrain, Ypred) R.append('knn score = ') R.append(knn_score) R.append('knn rmsle = ') R.append(knn_rmsle) R.append(' ') if knn_score>accuracy: accuracy = knn_score best_accuracy = 'knn' if knn_rmsle<error: error = knn_rmsle best_error = 'knn' b = 0.1 reg = linear_model.Lasso(alpha = b) reg.fit(Xtrain, Ytrain) scores = cross_val_score(reg, Xtrain, Ytrain, cv=10) Ypred=reg.predict(Xtrain) lasso_score = scores.mean() lasso_rmsle = rmsle(Ytrain, Ypred) R.append('lasso score = ') R.append(lasso_score) R.append('lasso rmsle = ') R.append(lasso_rmsle) R.append(' ') if lasso_score>accuracy: accuracy = lasso_score best_accuracy = 'lasso' if lasso_rmsle<error: error = lasso_rmsle best_error = 'lasso' b = 0.1 rid = linear_model.Ridge(alpha = b) rid.fit(Xtrain, Ytrain) scores = cross_val_score(rid, Xtrain, Ytrain, cv=10) Ypred = rid.predict(Xtrain) ridge_score = scores.mean() ridge_rmsle = rmsle(Ytrain, Ypred) R.append('ridge score = ') R.append(ridge_score) R.append('ridge rmsle = ') R.append(ridge_rmsle) R.append(' ') if ridge_score>accuracy: accuracy = ridge_score best_accuracy = 'ridge' if ridge_rmsle<error: error = ridge_rmsle best_error = 'ridge' b = 0.1 lars = linear_model.LassoLars(alpha = b) lars.fit(Xtrain, Ytrain) scores = cross_val_score(lars, Xtrain, Ytrain, cv=10) lars_score = scores.mean() Ypred = lars.predict(Xtrain) lars_rmsle = rmsle(Ytrain, Ypred) R.append('lars score = ') R.append(lars_score) R.append('lars rmsle = ') R.append(lars_rmsle) R.append(' ') if lars_score>accuracy: accuracy = lars_score best_accuracy = 'lars' if lars_rmsle<error: error = lars_rmsle best_error = 'lars' d = 4 forest = RandomForestRegressor(max_depth=d, random_state=0, n_estimators=100) forest.fit(Xtrain, Ytrain) scores = cross_val_score(forest, Xtrain, Ytrain, cv=10) Ypred = forest.predict(Xtrain) forest_score = scores.mean() forest_rmsle = rmsle(Ytrain, Ypred) R.append('forest score = ') R.append(forest_score) R.append('forest rmsle = ') R.append(forest_rmsle) R.append(' ') if forest_score>accuracy: accuracy = forest_score best_accuracy = 'forest' if forest_rmsle<error: error = forest_rmsle best_error = 'forest' for i in R: print(i) print('best accuracy =', accuracy, 'with', best_accuracy) print('minimum log error =', error, 'with', best_error )<find_best_model_class>
train['Name'] = le.fit_transform(train['Name']) train['Sex'] = le.fit_transform(train['Sex']) embarked = {0:'S',1:'C',2:'Q'} train['Embarked'] = train['Embarked'].fillna('Q') train['Embarked'] = le.fit_transform(train['Embarked'] )
Titanic - Machine Learning from Disaster
10,598,995
knn = KNeighborsRegressor(n_neighbors=10) knn.fit(Xtrain, Ytrain) scores = cross_val_score(knn, Xtrain, Ytrain, cv=10) Ypred = knn.predict(Xtrain) scores.mean()<compute_test_metric>
train = train.drop(columns = ['PassengerId','Ticket','Cabin','Age_Group']) train.head()
Titanic - Machine Learning from Disaster
10,598,995
rmsle(Ytrain, Ypred )<find_best_model_class>
from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2
Titanic - Machine Learning from Disaster
10,598,995
reg = linear_model.Lasso(alpha = 0.1) reg.fit(Xtrain, Ytrain) scores = cross_val_score(reg, Xtrain, Ytrain, cv=10) Ypred=reg.predict(Xtrain) scores.mean()<compute_test_metric>
features = train.iloc[:,1:] target = train.iloc[:,0]
Titanic - Machine Learning from Disaster
10,598,995
rmsle(Ytrain, Ypred )<find_best_model_class>
best_features = SelectKBest(score_func = chi2,k = 8) fit = best_features.fit(features,target) dfscores = pd.DataFrame(fit.scores_) dfcolumns = pd.DataFrame(features.columns) featureScores = pd.concat([dfcolumns,dfscores],axis=1) featureScores.columns = ['Column','Score'] print(featureScores.nlargest(8,'Score'))
Titanic - Machine Learning from Disaster
10,598,995
rid = linear_model.Ridge(alpha = 0.5) rid.fit(Xtrain, Ytrain) scores = cross_val_score(rid, Xtrain, Ytrain, cv=10) Ypred = rid.predict(Xtrain) scores.mean()<compute_test_metric>
train = train.drop(columns = ['Embarked','SibSp','Parch']) train.head()
Titanic - Machine Learning from Disaster
10,598,995
rmsle(Ytrain, Ypred )<compute_train_metric>
sc = StandardScaler() features = train[['Pclass','Sex','Name','Fare']].values features = sc.fit_transform(features) target = train['Survived'].values
Titanic - Machine Learning from Disaster
10,598,995
bay = linear_model.BayesianRidge() bay.fit(Xtrain, Ytrain) scores = cross_val_score(bay, Xtrain, Ytrain, cv=10) Ypred = bay.predict(Xtrain) scores.mean()<compute_test_metric>
x_train, x_test, y_train, y_test = train_test_split(features, target, test_size = 0.25, random_state = 2 )
Titanic - Machine Learning from Disaster
10,598,995
rmsle(Ytrain, Ypred )<find_best_model_class>
def grid_search_cv(classifier,hyperparameters): GSCV = GridSearchCV(classifier,hyperparameters,cv = 10) model = GSCV.fit(x_test,y_test) print(model) print('HIGHEST ACCURACY : ','{0:.2%}'.format(model.best_score_)) print('BEST PARAMETERS : ',model.best_params_ )
Titanic - Machine Learning from Disaster
10,598,995
lars = linear_model.LassoLars(alpha = 0.1) lars.fit(Xtrain, Ytrain) scores = cross_val_score(lars, Xtrain, Ytrain, cv=10) Ypred = lars.predict(Xtrain) scores.mean()<compute_test_metric>
from sklearn.linear_model import LogisticRegression
Titanic - Machine Learning from Disaster
10,598,995
rmsle(Ytrain, Ypred )<define_variables>
classifier_lr = LogisticRegression(random_state = 0,C=10,penalty= 'l2' )
Titanic - Machine Learning from Disaster
10,598,995
california_sea=[(41.990352, -124.216535),(41.936725, -124.199048),(41.862157, -124.220161),(41.758672, -124.240793),(41.730317, -124.162807),(41.672629, -124.139878),(41.722746, -124.151351),(41.671813, -124.136762),(41.618963, -124.109252),(41.470737, -124.072740),(41.383226, -124.066948),(41.308172, -124.094492),(41.212278, -124.121904), (41.137176, -124.165918),(41.062165, -124.165618),(41.020596, -124.115740),(40.928851, -124.143028),(40.858028, -124.126245),(40.812048, -124.181163),(40.728511, -124.235831),(40.649059, -124.301387),(40.586325, -124.344954),(40.511043, -124.388365),(40.440002, -124.409806),(40.395399, -124.383960),(40.322914, -124.349643),(40.241803, -124.337706),(40.186635, -124.253402),(40.122885, -124.169203),(40.067673, -124.068499),(40.008009, -124.029231), (39.922813, -123.945453),(39.837566, -123.873007),(39.735216, -123.828474),(39.654186, -123.789622),(39.564619, -123.761930),(39.399528, -123.821626),(39.201588, -123.770073),(39.076989, -123.691566),(38.960637, -123.724138),(38.879044, -123.662811),(38.754580, -123.507611),(38.634199, -123.386034),(38.496411, -123.193367),(38.336876, -123.061865),(38.259117, -122.974368), (38.151338, -122.952917),(38.060918, -122.980669),(37.996318, -123.002792), (38.026254, -122.926130),(38.004306, -122.827828),(37.931906, -122.744687),(37.902923, -122.652017),(37.872444, -122.594173),(37.880984, -122.392446), (37.815555, -122.367515),(37.628327, -122.331577),(37.542968, -122.455670), (37.370235, -122.414093),(37.290236, -122.415691),(37.167091, -122.356855), (37.088046, -122.276348),(36.987005, -122.157357),(36.951905, -122.049790), (36.969554, -121.914753),(36.925477, -121.862435),(36.824092, -121.802024), (36.620740, -121.851334),(36.480625, -121.934216),(36.282719, -121.866908), (36.162592, -121.678018),(35.990860, -121.498031),(35.827849, -121.382193), (35.671399, -121.272296),(35.608589, -121.143265),(35.453082, -120.919491), (35.297750, -120.877400),(35.189759, -120.819107),(35.180890, -120.736397), (35.097645, -120.628863),(34.932680, -120.660285),(34.842040, -120.610177), (34.742216, -120.618143),(34.583391, -120.639685),(34.528043, -120.518413), (34.457687, -120.472919),(34.458791, -120.347644),(34.469789, -120.138306), (34.422313, -119.903627),(34.399196, -119.699791),(34.408922, -119.552255),(34.335795, -119.408499),(34.288024, -119.329889),(34.199208, -119.247261),(34.115993, -119.153777),(34.041474, -118.899965),(34.035682, -118.855901),(34.018486, -118.822894),(34.003602, -118.805037),(34.016106, -118.785710), (34.029683, -118.744327),(34.037409, -118.667109),(34.036912, -118.580005), (34.009365, -118.502919),(33.984242, -118.472597),(33.960222, -118.454035),(33.867022, -118.402873),(33.810913, -118.390523),(33.770287, -118.420867),(33.716625, -118.060214),(33.606537, -117.889392),(33.385674, -117.578771),(33.270497, -117.443285),(33.127431, -117.326314),(33.053581, -117.291643),(32.831417, -117.277875),(32.683026, -117.189643),(32.536805, -117.122224)]<feature_engineering>
model_evaluation(classifier_lr )
Titanic - Machine Learning from Disaster
10,598,995
train_data3 = train_data2 train_data3["rooms_per_household"] = train_data3["total_rooms"]/train_data3["households"] train_data3["bedrooms_per_room"] = train_data3["total_bedrooms"]/train_data3["total_rooms"] train_data3["population_per_household"] = train_data3["population"]/train_data3["households"] train_data3["income_per_person"] = train_data3["median_income"]/train_data3["population_per_household"] train_data3['mean_rooms'] = train_data3['total_rooms']/train_data3['households'] train_data3['rooms_per_person'] = train_data3['total_rooms']/train_data3['population'] train_data3['mean_bedrooms'] = train_data3['total_bedrooms']/train_data3['households'] train_data3['bedrooms_per_person'] = train_data3['total_bedrooms']/train_data3['households'] train_data3['persons_per_household'] = train_data3['population']/train_data3['households'] train_data3['total_income'] = train_data3['median_income']*train_data3['households']<drop_column>
from sklearn.svm import SVC
Titanic - Machine Learning from Disaster
10,598,995
Xtrain3 = train_data3 Xtrain3 = Xtrain3.drop('Id', axis=1) Xtrain3 = Xtrain3.drop('median_house_value', axis=1) results(Xtrain3, Ytrain )<train_model>
classifier_svc = SVC(kernel = 'linear',C = 0.1 )
Titanic - Machine Learning from Disaster
10,598,995
d = 8 n = 100 boost = AdaBoostRegressor(DecisionTreeRegressor(max_depth=d), n_estimators=n) boost.fit(Xtrain3, Ytrain) Ypred = boost.predict(Xtrain3) boost_rmsle = rmsle(Ytrain, Ypred) print('error =', boost_rmsle )<train_model>
hyperparameters = {'C' : [0.01,0.1,1,10,100]} grid_search_cv(classifier_svc,hyperparameters )
Titanic - Machine Learning from Disaster
10,598,995
X_train, X_validation, y_train, y_validation = train_test_split(Xtrain3, Ytrain, train_size=0.7) model=CatBoostRegressor(iterations=200, depth=6, learning_rate=0.2, loss_function='RMSE') model.fit(Xtrain3, Ytrain,eval_set=(X_validation, y_validation),plot=True) Ypred = model.predict(Xtrain3) model_rmsle = rmsle(Ytrain, Ypred) print('error =', model_rmsle )<normalization>
model_evaluation(classifier_svc )
Titanic - Machine Learning from Disaster
10,598,995
test_data2 = test_data sea_distance=[] for i,c in test_data2.iterrows() : b=np.array(( c['latitude'],c['longitude'])) D=[] for j in california_sea: D.append(np.linalg.norm(b-j)) sea_distance.append(min(D)) test_data2 = test_data2.join(pd.Series(sea_distance,name="sea_distance")) Los_Angeles =(34.155652, -118.600019) San_Francisco =(37.775, -122.4183) la_sf_distance = [] for i,c in test_data2.iterrows() : b=np.array(( c['latitude'],c['longitude'])) D=[] D.append(np.linalg.norm(b-Los_Angeles)) D.append(np.linalg.norm(b-San_Francisco)) la_sf_distance.append(min(D)) test_data2 = test_data2.join(pd.Series(la_sf_distance,name="LA_SF_distance")) test_data2["rooms_per_household"] = test_data2["total_rooms"]/test_data2["households"] test_data2["bedrooms_per_room"] = test_data2["total_bedrooms"]/test_data2["total_rooms"] test_data2["population_per_household"] = test_data2["population"]/test_data2["households"] test_data2["income_per_person"] = test_data2["median_income"]/test_data2["population_per_household"] test_data2['mean_rooms'] = test_data2['total_rooms']/test_data2['households'] test_data2['rooms_per_person'] = test_data2['total_rooms']/test_data2['population'] test_data2['mean_bedrooms'] = test_data2['total_bedrooms']/test_data2['households'] test_data2['bedrooms_per_person'] = test_data2['total_bedrooms']/test_data2['households'] test_data2['persons_per_household'] = test_data2['population']/test_data2['households'] test_data2['total_income'] = test_data2['median_income']*test_data2['households']<drop_column>
from sklearn.tree import DecisionTreeClassifier
Titanic - Machine Learning from Disaster
10,598,995
Xtest2 = test_data2.drop('Id', axis=1 )<predict_on_test>
classifier_dt = DecisionTreeClassifier(criterion = 'entropy' )
Titanic - Machine Learning from Disaster
10,598,995
forest = RandomForestRegressor(max_depth=21, random_state=0, n_estimators=1000) forest.fit(Xtrain3, Ytrain) Ypred = forest.predict(Xtrain3) forest_rmsle = rmsle(Ytrain, Ypred) print('log error =', forest_rmsle) prediction = forest.predict(Xtest2 )<save_to_csv>
model_evaluation(classifier_dt )
Titanic - Machine Learning from Disaster
10,598,995
arq = open("submission.csv", "w") arq.write("Id,median_house_value ") for i, j in zip(test_data2['Id'], prediction): arq.write(str(i)+ "," + str(j)+" ") arq.close()<import_modules>
from sklearn.ensemble import RandomForestClassifier
Titanic - Machine Learning from Disaster
10,598,995
import pandas as pd import matplotlib.pyplot as plt import numpy as np import sklearn from sklearn.neighbors import KNeighborsRegressor from sklearn import linear_model from sklearn.ensemble import RandomForestRegressor import os<load_from_csv>
classifier_rf = RandomForestClassifier(max_depth = 2,random_state = 0 )
Titanic - Machine Learning from Disaster
10,598,995
trainData = pd.read_csv(".. /input/databa/train.csv") testData = pd.read_csv(".. /input/databa/test.csv" )<compute_test_metric>
model_evaluation(classifier_rf )
Titanic - Machine Learning from Disaster
10,598,995
def calc_rmsle(Yreal, Ypred): sum=0.0 n=len(Yreal) for x in range(n): if Ypred[x]<0: sum = sum +(0 - np.log(Yreal[x]+1)) **2 else: sum = sum +(np.log(Ypred[x]+1)- np.log(Yreal[x]+1)) **2 return np.sqrt(( sum/n)) <prepare_x_and_y>
from sklearn.neighbors import KNeighborsClassifier
Titanic - Machine Learning from Disaster
10,598,995
Xtrain = trainData Xtrain = Xtrain.drop('Id', axis=1) Xtrain = Xtrain.drop('median_house_value', axis=1) Ytrain = trainData['median_house_value'] Xtest = testData Xtest = Xtest.drop('Id', axis=1 )<find_best_params>
from sklearn.neighbors import KNeighborsClassifier
Titanic - Machine Learning from Disaster
10,598,995
best_rmsle = 1 for i in range(3, 30): knn = KNeighborsRegressor(n_neighbors=i) knn.fit(Xtrain, Ytrain) Ypred = knn.predict(Xtrain) if calc_rmsle(Ytrain, Ypred)< best_rmsle: best_n = i best_rmsle = calc_rmsle(Ytrain, Ypred) print(best_n) print(best_rmsle )<find_best_params>
from sklearn.neighbors import KNeighborsClassifier
Titanic - Machine Learning from Disaster
10,598,995
best_rmsle = 1 for i in range(1, 10): lasso = linear_model.Lasso(alpha = i*0.1) lasso.fit(Xtrain, Ytrain) Ypred = lasso.predict(Xtrain) if calc_rmsle(Ytrain, Ypred)<best_rmsle: best_alpha = i*0.1 best_rmsle = calc_rmsle(Ytrain, Ypred) print(best_alpha) print(best_rmsle )<find_best_params>
classifier_knn = KNeighborsClassifier(leaf_size = 7, n_neighbors = 3,p = 1 )
Titanic - Machine Learning from Disaster
10,598,995
best_rmsle = 1 for i in range(1,30): forest = RandomForestRegressor(max_depth=i, random_state=0, n_estimators=50) forest.fit(Xtrain, Ytrain) Ypred = forest.predict(Xtrain) if calc_rmsle(Ytrain, Ypred)<best_rmsle: best_d = i best_rmsle = calc_rmsle(Ytrain, Ypred) print(best_d) print(best_rmsle )<save_to_csv>
leaf_size = list(range(1,50)) n_neighbors = list(range(1,30)) p=[1,2] hyperparameters = dict(leaf_size=leaf_size, n_neighbors=n_neighbors, p=p) grid_search_cv(classifier_knn,hyperparameters )
Titanic - Machine Learning from Disaster
10,598,995
forest = RandomForestRegressor(max_depth=26, random_state=0, n_estimators=50) forest.fit(Xtrain, Ytrain) Ytest = forest.predict(Xtest) predictions = pd.DataFrame({"Id":testData.Id, "median_house_value":Ytest}) predictions.to_csv("predictions.csv", index=False) predictions<import_modules>
model(classifier_knn )
Titanic - Machine Learning from Disaster
10,598,995
from sklearn.metrics import make_scorer import pandas as pd import math import scipy from scipy.stats import pearsonr import numpy as np import matplotlib.pyplot as plt import sklearn from sklearn import tree from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import BaggingRegressor from sklearn.ensemble import AdaBoostRegressor from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score from sklearn import linear_model<load_from_csv>
model_evaluation(classifier_knn )
Titanic - Machine Learning from Disaster
10,598,995
train = pd.read_csv(".. /input/california/train.csv" )<feature_engineering>
models = {'MODELS':['LOGISTIC REGRESSION','SUPPORT VECTOR CLASSIFIER','DECISION TREE CLASSIFIER','RANDOM FOREST CLASSIFIER','K-NEAREST NEIGHBORS'], 'CROSS VAL ACCURACY(%)':[78.73,79.19,81.59,78.29,81.10]}
Titanic - Machine Learning from Disaster
10,598,995
train = pd.read_csv(".. /input/california/train.csv") vale_silicio = {'latitude':37.84, 'longitude':-122.05} train['dist_vs'] =(((train['longitude']-vale_silicio['longitude'])**2+(train['latitude']-vale_silicio['latitude'])**2)**(1/2)) beverly_hills = {'latitude':33.933333, 'longitude':-118.4} train['dist_bh'] =(((train['longitude']-beverly_hills['longitude'])**2+(train['latitude']-beverly_hills['latitude'])**2)**(1/2)) corona_del_mar = {'latitude':33.5978595,'longitude':-117.8730142} train['dist_cdm'] =(((train['longitude']-corona_del_mar['longitude'])**2+(train['latitude']-corona_del_mar['latitude'])**2)**(1/2)) montecito = {'latitude':34.43666 ,'longitude':-119.63208} train['dist_mt'] =(((train['longitude']-montecito['longitude'])**2+(train['latitude']-montecito['latitude'])**2)**(1/2)) atherton = {'latitude':37.458611,'longitude': -122.2} train['dist_at'] =(((train['longitude']-atherton['longitude'])**2+(train['latitude']-atherton['latitude'])**2)**(1/2)) wdside = {'latitude':37.420833, 'longitude':-122.259722} train['dist_wd'] =(((train['longitude']-wdside['longitude'])**2+(train['latitude']-wdside['latitude'])**2)**(1/2)) hdhills = {'latitude': 34.1675, 'longitude':-118.660833} train['dist_hh'] =(((train['longitude']-hdhills['longitude'])**2+(train['latitude']-hdhills['latitude'])**2)**(1/2)) latos = {'latitude':37.368056, 'longitude':-122.0975} train['dist_lal'] =(((train['longitude']-latos['longitude'])**2+(train['latitude']-latos['latitude'])**2)**(1/2)) bvde = {'latitude':37.872778, 'longitude':-122.464444} train['dist_bvd'] =(((train['longitude']-bvde['longitude'])**2+(train['latitude']-bvde['latitude'])**2)**(1/2)) stm = {'latitude':34.021944, 'longitude': -118.481389} train['dist_stm'] =(((train['longitude']-stm['longitude'])**2+(train['latitude']-stm['latitude'])**2)**(1/2)) ross = {'latitude':37.9625,'longitude': -122.555} train['dist_ros'] =(((train['longitude']-ross['longitude'])**2+(train['latitude']-ross['latitude'])**2)**(1/2)) fremont = {'latitude':37.548333, 'longitude':-121.988611} train['dist_frm'] =(((train['longitude']-fremont['longitude'])**2+(train['latitude']-fremont['latitude'])**2)**(1/2)) hbr = {'latitude':37.560278, 'longitude': -122.356389} train['dist_hbr'] =(((train['longitude']-hbr['longitude'])**2+(train['latitude']-hbr['latitude'])**2)**(1/2)) npb = {'latitude':33.616667, 'longitude':-117.8975} train['dist_npb'] =(((train['longitude']-npb['longitude'])**2+(train['latitude']-npb['latitude'])**2)**(1/2)) plh = {'latitude':33.5901144,'longitude':-117.8445537} train['dist_plh'] =(((train['longitude']-plh['longitude'])**2+(train['latitude']-plh['latitude'])**2)**(1/2)) blt = {'latitude': 33.610278, 'longitude': -114.596389} train['dist_blt'] =(((train['longitude']-blt['longitude'])**2+(train['latitude']-blt['latitude'])**2)**(1/2)) tnp = {'latitude':34.138333, 'longitude': -116.0725} train['dist_tnp'] =(((train['longitude']-tnp['longitude'])**2+(train['latitude']-tnp['latitude'])**2)**(1/2)) avn = {'latitude':35.209167, 'longitude': -118.828333} train['dist_avn'] =(((train['longitude']-avn['longitude'])**2+(train['latitude']-avn['latitude'])**2)**(1/2)) wdl = {'latitude':36.416389, 'longitude':-119.099444} train['dist_wdl'] =(((train['longitude']-wdl['longitude'])**2+(train['latitude']-wdl['latitude'])**2)**(1/2)) plr = {'latitude':36.611667, 'longitude':-119.526944} train['dist_plr'] =(((train['longitude']-plr['longitude'])**2+(train['latitude']-plr['latitude'])**2)**(1/2)) ccc = {'latitude': 35.125833, 'longitude': -117.985833} train['dist_ccc'] =(((train['longitude']-ccc['longitude'])**2+(train['latitude']-ccc['latitude'])**2)**(1/2)) fmv = {'latitude':36.301111, 'longitude': -119.2075} train['dist_fmv'] =(((train['longitude']-fmv['longitude'])**2+(train['latitude']-fmv['latitude'])**2)**(1/2)) lds = {'latitude':36.2, 'longitude':-119.083333} train['dist_lds'] =(((train['longitude']-lds['longitude'])**2+(train['latitude']-lds['latitude'])**2)**(1/2)) wsc = {'latitude':35.594167, 'longitude':-119.340833} train['dist_wsc'] =(((train['longitude']-wsc['longitude'])**2+(train['latitude']-wsc['latitude'])**2)**(1/2)) ccn = {'latitude':36.098056, 'longitude':-119.560278} train['dist_ccn'] =(((train['longitude']-ccn['longitude'])**2+(train['latitude']-ccn['latitude'])**2)**(1/2)) a = train['median_house_value'] train = train.drop(columns = ['median_house_value']) train['median_house_value'] = a train.head()<load_from_csv>
cross_val = pd.DataFrame(models) cross_val.head()
Titanic - Machine Learning from Disaster
10,598,995
test = pd.read_csv(".. /input/california/test.csv" )<feature_engineering>
test = pd.read_csv('.. /input/titanic/test.csv') test.head()
Titanic - Machine Learning from Disaster
10,598,995
test = pd.read_csv(".. /input/california/test.csv") vale_silicio = {'latitude':37.84, 'longitude':-122.05} test['dist_vs'] =(((test['longitude']-vale_silicio['longitude'])**2+(test['latitude']-vale_silicio['latitude'])**2)**(1/2)) beverly_hills = {'latitude':33.933333, 'longitude':-118.4} test['dist_bh'] =(((test['longitude']-beverly_hills['longitude'])**2+(test['latitude']-beverly_hills['latitude'])**2)**(1/2)) corona_del_mar = {'latitude':33.5978595,'longitude':-117.8730142} test['dist_cdm'] =(((test['longitude']-corona_del_mar['longitude'])**2+(test['latitude']-corona_del_mar['latitude'])**2)**(1/2)) montecito = {'latitude':34.43666 ,'longitude':-119.63208} test['dist_mt'] =(((test['longitude']-montecito['longitude'])**2+(test['latitude']-montecito['latitude'])**2)**(1/2)) atherton = {'latitude':37.458611,'longitude': -122.2} test['dist_at'] =(((test['longitude']-atherton['longitude'])**2+(test['latitude']-atherton['latitude'])**2)**(1/2)) wdside = {'latitude':37.420833, 'longitude':-122.259722} test['dist_wd'] =(((test['longitude']-wdside['longitude'])**2+(test['latitude']-wdside['latitude'])**2)**(1/2)) hdhills = {'latitude': 34.1675, 'longitude':-118.660833} test['dist_hh'] =(((test['longitude']-hdhills['longitude'])**2+(test['latitude']-hdhills['latitude'])**2)**(1/2)) latos = {'latitude':37.368056, 'longitude':-122.0975} test['dist_lal'] =(((test['longitude']-latos['longitude'])**2+(test['latitude']-latos['latitude'])**2)**(1/2)) bvde = {'latitude':37.872778, 'longitude':-122.464444} test['dist_bvd'] =(((test['longitude']-bvde['longitude'])**2+(test['latitude']-bvde['latitude'])**2)**(1/2)) stm = {'latitude':34.021944, 'longitude': -118.481389} test['dist_stm'] =(((test['longitude']-stm['longitude'])**2+(test['latitude']-stm['latitude'])**2)**(1/2)) ross = {'latitude':37.9625,'longitude': -122.555} test['dist_ros'] =(((test['longitude']-ross['longitude'])**2+(test['latitude']-ross['latitude'])**2)**(1/2)) fremont = {'latitude':37.548333, 'longitude':-121.988611} test['dist_frm'] =(((test['longitude']-fremont['longitude'])**2+(test['latitude']-fremont['latitude'])**2)**(1/2)) hbr = {'latitude':37.560278, 'longitude': -122.356389} test['dist_hbr'] =(((test['longitude']-hbr['longitude'])**2+(test['latitude']-hbr['latitude'])**2)**(1/2)) npb = {'latitude':33.616667, 'longitude':-117.8975} test['dist_npb'] =(((test['longitude']-npb['longitude'])**2+(test['latitude']-npb['latitude'])**2)**(1/2)) plh = {'latitude':33.5901144,'longitude':-117.8445537} test['dist_plh'] =(((test['longitude']-plh['longitude'])**2+(test['latitude']-plh['latitude'])**2)**(1/2)) blt = {'latitude': 33.610278, 'longitude': -114.596389} test['dist_blt'] =(((test['longitude']-blt['longitude'])**2+(test['latitude']-blt['latitude'])**2)**(1/2)) tnp = {'latitude':34.138333, 'longitude': -116.0725} test['dist_tnp'] =(((test['longitude']-tnp['longitude'])**2+(test['latitude']-tnp['latitude'])**2)**(1/2)) avn = {'latitude':35.209167, 'longitude': -118.828333} test['dist_avn'] =(((test['longitude']-avn['longitude'])**2+(test['latitude']-avn['latitude'])**2)**(1/2)) wdl = {'latitude':36.416389, 'longitude':-119.099444} test['dist_wdl'] =(((test['longitude']-wdl['longitude'])**2+(test['latitude']-wdl['latitude'])**2)**(1/2)) plr = {'latitude':36.611667, 'longitude':-119.526944} test['dist_plr'] =(((test['longitude']-plr['longitude'])**2+(test['latitude']-plr['latitude'])**2)**(1/2)) ccc = {'latitude': 35.125833, 'longitude': -117.985833} test['dist_ccc'] =(((test['longitude']-ccc['longitude'])**2+(test['latitude']-ccc['latitude'])**2)**(1/2)) fmv = {'latitude':36.301111, 'longitude': -119.2075} test['dist_fmv'] =(((test['longitude']-fmv['longitude'])**2+(test['latitude']-fmv['latitude'])**2)**(1/2)) lds = {'latitude':36.2, 'longitude':-119.083333} test['dist_lds'] =(((test['longitude']-lds['longitude'])**2+(test['latitude']-lds['latitude'])**2)**(1/2)) wsc = {'latitude':35.594167, 'longitude':-119.340833} test['dist_wsc'] =(((test['longitude']-wsc['longitude'])**2+(test['latitude']-wsc['latitude'])**2)**(1/2)) ccn = {'latitude':36.098056, 'longitude':-119.560278} test['dist_ccn'] =(((test['longitude']-ccn['longitude'])**2+(test['latitude']-ccn['latitude'])**2)**(1/2)) test.head()<compute_test_metric>
passenger_id = test['PassengerId'] test = test.drop(columns = ['PassengerId','Age','SibSp','Parch','Ticket','Cabin','Embarked']) test.head()
Titanic - Machine Learning from Disaster
10,598,995
def RMSLE(Y, Ypred): n = len(Y) soma = 0 Y = np.array(Y) for i in range(len(Y)) : soma +=(math.log(abs(Ypred[i])+ 1)- math.log(Y[i] + 1)) **2 return math.sqrt(soma / n) scorer_rmsle = make_scorer(RMSLE )<prepare_x_and_y>
test.isnull().sum()
Titanic - Machine Learning from Disaster
10,598,995
Xtrain = train.iloc[:,1:34] Ytrain = train.median_house_value Xtest = test.iloc[:,1:34]<train_on_grid>
test.isnull().sum()
Titanic - Machine Learning from Disaster
10,598,995
reglin = linear_model.LinearRegression() reglin.fit(Xtrain, Ytrain) total = 0 c_val = 10 scores = cross_val_score(reglin,Xtrain,Ytrain, cv = c_val,scoring = scorer_rmsle) total = 0 for j in scores: total += j acuracia_esperada = total/c_val print(acuracia_esperada) <predict_on_test>
test['Fare'] = test['Fare'].fillna(7.75 )
Titanic - Machine Learning from Disaster
10,598,995
Ytest_pred1 = reglin.predict(Xtest) for i in Ytest_pred1: if i < 0: Ytest_pred1 = abs(Ytest_pred1 )<data_type_conversions>
get_initials(test,'Name') test['Name'] = le.fit_transform(test['Name']) test['Sex'] = le.fit_transform(test['Sex']) test.head()
Titanic - Machine Learning from Disaster
10,598,995
result1 = np.vstack(( test['Id'], Ytest_pred1)).T.astype(int) x1 = ["Id","median_house_value"] Resultado1 = pd.DataFrame(columns = x1, data = result1 )<train_on_grid>
test = sc.fit_transform(test )
Titanic - Machine Learning from Disaster
10,598,995
ridge = linear_model.Ridge() ridge.fit(Xtrain,Ytrain) total = 0 c_val = 10 scores = cross_val_score(ridge,Xtrain,Ytrain, cv = c_val,scoring = scorer_rmsle) total = 0 for j in scores: total += j acuracia_esperada = total/c_val print(acuracia_esperada )<predict_on_test>
prediction = classifier_knn.predict(test) submission = pd.DataFrame({'PassengerId':passenger_id,'Survived':prediction}) submission.to_csv('TITANIC_SUBMISSION.csv',index = False) submission
Titanic - Machine Learning from Disaster
4,252,362
Ytest_pred2 = ridge.predict(Xtest) Ytest_pred2 = abs(Ytest_pred2 )<data_type_conversions>
train_dir = ".. /input/train.csv" test_dir = ".. /input/test.csv"
Titanic - Machine Learning from Disaster
4,252,362
result2 = np.vstack(( test['Id'], Ytest_pred2)).T.astype(int) x2 = ["Id","median_house_value"] Resultado2 = pd.DataFrame(columns = x2, data = result2 )<compute_train_metric>
df = pd.read_csv(train_dir) test_df = pd.read_csv(test_dir )
Titanic - Machine Learning from Disaster
4,252,362
lasso = linear_model.Lasso() lasso.fit(Xtrain, Ytrain) total = 0 c_val = 10 scores = cross_val_score(lasso,Xtrain,Ytrain, cv = c_val,scoring = scorer_rmsle) total = 0 for j in scores: total += j acuracia_esperada = total/c_val print(acuracia_esperada )<predict_on_test>
df.drop(["Ticket"], axis = 1, inplace = True) test_df.drop(["Ticket"], axis = 1, inplace = True) df.info() df.head()
Titanic - Machine Learning from Disaster
4,252,362
Ytest_pred3 = ridge.predict(Xtest) Ytest_pred3 = abs(Ytest_pred3 )<data_type_conversions>
print(df["Survived"].value_counts()) df["Survived"].value_counts().plot(kind = "pie" )
Titanic - Machine Learning from Disaster
4,252,362
result3 = np.vstack(( test['Id'], Ytest_pred3)).T.astype(int) x3 = ["Id","median_house_value"] Resultado3 = pd.DataFrame(columns = x3, data = result3 )<train_on_grid>
df.drop("PassengerId", axis = 1, inplace = True) test_df.drop("PassengerId", axis = 1, inplace = True )
Titanic - Machine Learning from Disaster
4,252,362
arvore = tree.DecisionTreeRegressor() arvore.fit(Xtrain,Ytrain) scores = cross_val_score(arvore,Xtrain,Ytrain, cv = c_val,scoring = scorer_rmsle) total = 0 for j in scores: total += j acuracia_esperada = total/c_val print(acuracia_esperada )<predict_on_test>
pclass_dum = pd.get_dummies(df["Pclass"]) test_pclass_dum = pd.get_dummies(test_df["Pclass"]) df = pd.concat([df, pclass_dum], axis = 1) test_df = pd.concat([test_df, test_pclass_dum], axis = 1) df.rename({1:"pclass1", 2:"pclass2", 3:"pclass3"}, axis = 1, inplace = True) test_df.rename({1:"pclass1", 2:"pclass2", 3:"pclass3"}, axis = 1, inplace = True) df.drop(["Pclass"], axis = 1, inplace = True) test_df.drop(["Pclass"], axis = 1, inplace = True )
Titanic - Machine Learning from Disaster
4,252,362
Ytest_pred4 = arvore.predict(Xtest) <data_type_conversions>
df["Sex"].replace("male", 0, inplace = True) test_df["Sex"].replace("male", 0, inplace = True) df["Sex"].replace("female", 1, inplace = True) test_df["Sex"].replace("female", 1, inplace = True) df["Embarked"].fillna("S", inplace = True) test_df["Embarked"].fillna("S", inplace = True) pclass_dum = pd.get_dummies(df["Embarked"]) test_pclass_dum = pd.get_dummies(test_df["Embarked"]) df = pd.concat([df, pclass_dum], axis = 1) test_df = pd.concat([test_df, test_pclass_dum], axis = 1) df.rename({"S":"embarked_s", "C":"embarked_c", "Q":"embarked_q"}, axis = 1, inplace = True) test_df.rename({"S":"embarked_s", "C":"embarked_c", "Q":"embarked_q"}, axis = 1, inplace = True) df.drop(["Embarked"], axis = 1, inplace = True) test_df.drop(["Embarked"], axis = 1, inplace = True )
Titanic - Machine Learning from Disaster
4,252,362
result4 = np.vstack(( test['Id'], Ytest_pred4)).T.astype(int) x4 = ["Id","median_house_value"] Resultado4 = pd.DataFrame(columns = x4, data = result4 )<train_on_grid>
def create_family_ranges(df): familysize = [] for members in df["n_fam_mem"]: if members == 1: familysize.append(1) elif members == 2: familysize.append(2) elif members>2 and members<=4: familysize.append(3) elif members > 4: familysize.append(4) return familysize famsize = create_family_ranges(df) df["familysize"] = famsize test_famsize = create_family_ranges(test_df) test_df["familysize"] = test_famsize
Titanic - Machine Learning from Disaster
4,252,362
bot = BaggingRegressor(n_estimators = 200) bot.fit(Xtrain,Ytrain) total = 0 c_val = 10 scores = cross_val_score(bot,Xtrain,Ytrain, cv = c_val,scoring = scorer_rmsle) total = 0 for j in scores: total += j acuracia_esperada = total/c_val print(acuracia_esperada )<predict_on_test>
fsizedummies = pd.get_dummies(df["familysize"]) test_fsizedummies = pd.get_dummies(test_df["familysize"]) df = pd.concat([df, fsizedummies], axis = 1) test_df = pd.concat([test_df, test_fsizedummies], axis = 1) df.rename({1:"fam_single",2:"fam_small",3:"fam_medium", 4:"fam_big"}, axis = 1, inplace = True) test_df.rename({1:"fam_single",2:"fam_small",3:"fam_medium", 4:"fam_big"}, axis = 1, inplace = True )
Titanic - Machine Learning from Disaster
4,252,362
Ytest_pred5 = bot.predict(Xtest) <create_dataframe>
reg_df = df.drop(["Survived", "Name", "Cabin"], axis = 1) reg_df_test = test_df.drop(["Name", "Cabin"], axis = 1) age_reg_df = reg_df[reg_df["Age"].isna() == False] age_reg_df_test = reg_df_test[reg_df_test["Age"].isna() == False] new_age_df = age_reg_df.append(age_reg_df_test) new_age_X = new_age_df.drop(["Age"], axis = 1) new_age_y = new_age_df["Age"] new_age_X["Fare"].fillna(df["Fare"].median() , inplace = True) linear_reg_model = LinearRegression().fit(new_age_X, new_age_y )
Titanic - Machine Learning from Disaster
4,252,362
result5 = np.vstack(( test['Id'], Ytest_pred5)).T.astype(int) x5 = ["Id","median_house_value"] Resultado5 = pd.DataFrame(columns = x5, data = result5 )<find_best_model_class>
def get_age_indexes_to_replace(df): age_temp_list = df["Age"].values.tolist() indexes_age_replace = [] age_temp_list = [str(x)for x in age_temp_list] for i, item in enumerate(age_temp_list): if item == "nan": indexes_age_replace.append(i) return indexes_age_replace indexes_to_replace_main = get_age_indexes_to_replace(df) indexes_to_replace_test = get_age_indexes_to_replace(test_df) def linear_age_predictions(reg_df, indexes_age_replace): reg_df_temp = reg_df.drop(["Age"], axis = 1) age_predictions = [] for i in indexes_age_replace: x = reg_df_temp.iloc[i] x = np.array(x ).reshape(1,-1) pred = linear_reg_model.predict(x) age_predictions.append(pred) return age_predictions age_predictions_main = linear_age_predictions(reg_df, indexes_to_replace_main) age_predictions_test = linear_age_predictions(reg_df_test, indexes_to_replace_test) def fill_age_nan(df, indexes_age_replace, age_predictions): for i, item in enumerate(indexes_age_replace): df["Age"][item] = age_predictions[i] return df df = fill_age_nan(df, indexes_to_replace_main, age_predictions_main) df_test = fill_age_nan(test_df, indexes_to_replace_test, age_predictions_test )
Titanic - Machine Learning from Disaster
4,252,362
rdf = RandomForestRegressor(n_estimators = 200, max_features = 'log2', min_samples_leaf = 1) rdf.fit(Xtrain, Ytrain) total = 0 c_val = 10 scores = cross_val_score(rdf,Xtrain,Ytrain, cv = c_val,scoring = scorer_rmsle) total = 0 for j in scores: total += j acuracia_esperada = total/c_val print(acuracia_esperada )<predict_on_test>
def age_to_int(df): agelist = df["Age"].values.tolist() for i in range(len(agelist)) : if agelist[i] < 14: agelist[i] = 0 elif agelist[i] >= 14 and agelist[i] < 25: agelist[i] = 1 elif agelist[i]>=25 and agelist[i]<60: agelist[i] = 2 elif agelist[i]>=60: agelist[i] = 3 ageint = pd.DataFrame(agelist) return ageint
Titanic - Machine Learning from Disaster
4,252,362
Ytest_pred6 = rdf.predict(Xtest) <save_to_csv>
ageint = age_to_int(df) df["Ageint"] = ageint df.drop("Age", axis = 1, inplace = True) test_ageint = age_to_int(test_df) test_df["Ageint"] = test_ageint test_df.drop("Age", axis = 1, inplace = True )
Titanic - Machine Learning from Disaster
4,252,362
result6 = np.vstack(( test['Id'], Ytest_pred6)).T.astype(int) x6 = ["Id","median_house_value"] Resultado = pd.DataFrame(columns = x6, data = result6) Resultado.to_csv("resultados_rdf.csv", index = False )<train_on_grid>
def conv_fare_ranges(df): fare_ranges = [] for fare in df["actual_fare"]: if fare < 7: fare_ranges.append(0) elif fare >=7 and fare < 14: fare_ranges.append(1) elif fare >=14 and fare < 30: fare_ranges.append(2) elif fare >=30 and fare < 50: fare_ranges.append(3) elif fare >=50: fare_ranges.append(4) return fare_ranges fare_ranges = conv_fare_ranges(df) df["fare_ranges"] = fare_ranges test_fare_ranges = conv_fare_ranges(test_df) test_df["fare_ranges"] = test_fare_ranges
Titanic - Machine Learning from Disaster