kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
2,684,355
df_train['sat_id'].value_counts()<compute_test_metric>
knn = KNeighborsClassifier(n_neighbors = 3) knn.fit(X_train, y_train) Y_pred = knn.predict(X_test) acc_knn = round(knn.score(X_train, y_train)* 100, 3) acc_knn
Titanic - Machine Learning from Disaster
2,684,355
def mean_absolute_percentage_error(y_true, y_pred): return np.mean(np.abs(( y_true - y_pred)/ y_true)) * 100<compute_train_metric>
gaussian = GaussianNB() gaussian.fit(X_train, y_train) Y_pred = gaussian.predict(X_test) acc_gaussian = round(gaussian.score(X_train, y_train)* 100, 3) acc_gaussian
Titanic - Machine Learning from Disaster
2,684,355
def predict_with_season_and_trend(train,test,period = 24): count = test.shape[0]//period res = test.shape[0]%period os1 = train.iloc[-period:].error.values.tolist() *count if res != 0: os1 += train.iloc[-period:-(period-res)].error.values.tolist() seas = np.array(os1) Mm = 48 mm = 25 trend = train.error[-Mm:].rolling(window=24 ).mean() s,*_ = linregress(np.arange(mm),trend.dropna().values) slope = s*np.arange(0,test.shape[0]) seas+=slope return seas<predict_on_test>
perceptron = Perceptron(max_iter=1000, tol=0.001) perceptron.fit(X_train, y_train) Y_pred = perceptron.predict(X_test) acc_perceptron = round(perceptron.score(X_train, y_train)* 100, 3) acc_perceptron
Titanic - Machine Learning from Disaster
2,684,355
@illustration def predictions(indices): dff = df[df.sat_id == indices] df_traning = dff[dff.type == 'train'] df_testing = dff[dff.type == 'test'] pred = predict_with_season_and_trend(df_traning,df_testing,period = 24) a = [{'id' : df_testing["id"].values[i], 'error_p' : pred[i]} for i in range(df_testing.shape[0])] return a<predict_on_test>
linear_svc = LinearSVC(max_iter=10000) linear_svc.fit(X_train, y_train) Y_pred = linear_svc.predict(X_test) acc_linear_svc = round(linear_svc.score(X_train, y_train)* 100, 3) acc_linear_svc
Titanic - Machine Learning from Disaster
2,684,355
@illustration def test_pred(indices): t_df = df_train[df_train.sat_id == indices] sh = np.int64(t_df.shape[0]*0.7) df_traning = t_df.iloc[:sh] df_testing = t_df.iloc[sh:] pred = predict_with_season_and_trend(df_traning,df_testing,period = 24) df_testing['pred_error'] = pred return mean_absolute_percentage_error(df_testing.error,df_testing.pred_error )<categorify>
sgd = SGDClassifier(max_iter=10000, tol=0.001) sgd.fit(X_train, y_train) Y_pred = sgd.predict(X_test) acc_sgd = round(sgd.score(X_train, y_train)* 100, 3) acc_sgd
Titanic - Machine Learning from Disaster
2,684,355
with ThreadPool(20)as pool: fold = pool.map(predictions,range(600))<create_dataframe>
decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) Y_pred = decision_tree.predict(X_test) acc_decision_tree = round(decision_tree.score(X_train, y_train)* 100, 3) acc_decision_tree
Titanic - Machine Learning from Disaster
2,684,355
fold = [x[i] for x in fold for i in range(len(x)) ] kar = pd.DataFrame(fold) kar<merge>
random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(X_train, y_train) Y_pred = random_forest.predict(X_test) random_forest.score(X_train, y_train) acc_random_forest = round(random_forest.score(X_train, y_train)* 100, 3) acc_random_forest
Titanic - Machine Learning from Disaster
2,684,355
predictions = pd.merge(df[df.type == 'test'],kar,how = 'inner',on = 'id') predictions.error = predictions.error_p predictions.info()<load_from_csv>
MLP_clf = MLPClassifier(activation='tanh', solver='lbfgs', alpha=1e-7, hidden_layer_sizes=(50,50), random_state=1, warm_start=True) MLP_clf.fit(X_train, y_train) Y_pred = MLP_clf.predict(X_test) acc_MLP_clf = round(MLP_clf.score(X_train, y_train)* 100, 3) acc_MLP_clf
Titanic - Machine Learning from Disaster
2,684,355
new_df = pd.read_csv(data_root+'sub.csv',sep = ',') our_preds = pd.merge(predictions,new_df,how = 'inner',on = 'id') our_preds = our_preds[['id','error_x']] our_preds.rename(columns={'error_x': 'error'}, inplace=True) our_preds<save_to_csv>
adaboost = AdaBoostClassifier() adaboost.fit(X_train, y_train) Y_pred = adaboost.predict(X_test) acc_adaboost = round(adaboost.score(X_train, y_train)* 100, 3) acc_adaboost
Titanic - Machine Learning from Disaster
2,684,355
our_preds.to_csv('/kaggle/output',sep=",",index = False )<save_to_csv>
gboost = GradientBoostingClassifier() gboost.fit(X_train, y_train) Y_pred = gboost.predict(X_test) acc_gboost = round(gboost.score(X_train, y_train)* 100, 3) acc_gboost
Titanic - Machine Learning from Disaster
2,684,355
our_preds.to_csv('/kaggle/working/pred.csv', index = False )<set_options>
models = pd.DataFrame({ 'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', 'Random Forest', 'Naive Bayes', 'Perceptron', 'Stochastic Gradient Decent', 'Linear SVC', 'Decision Tree', 'MLPClassifier', 'AdaBoostClassifier', 'GradientBoostingClassifier'], 'Score': [acc_svc, acc_knn, acc_log, acc_random_forest, acc_gaussian, acc_perceptron, acc_sgd, acc_linear_svc, acc_decision_tree, acc_MLP_clf, acc_adaboost, acc_gboost]}) models.sort_values(by='Score', ascending=False )
Titanic - Machine Learning from Disaster
2,684,355
cf.set_config_file(offline=True) for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) <load_from_csv>
Y_pred = random_forest.predict(X_test )
Titanic - Machine Learning from Disaster
2,684,355
root = Path('.. /input/great-energy-predictor-shootout-i') df_submit = pd.read_csv(root/'sample_submission.csv') df_submit<data_type_conversions>
submission = pd.DataFrame({ "PassengerId": df_test["PassengerId"], "Survived": Y_pred }) submission.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
2,614,604
df_test_A = pd.read_csv(root/'Atest.dat',delim_whitespace=True) df_test_A['Datetime'] = pd.to_datetime(( df_test_A['YEAR']+1900 ).astype('str')\ +'-'+(df_test_A['MONTH'] ).astype('str')\ +'-'+(df_test_A['DAY'] ).astype('str')\ +' '+(df_test_A['HOUR']/100 ).astype('int' ).astype('str')\ +':00') df_test_A.set_index('Datetime', inplace=True) df_test_A = df_test_A.drop(['MONTH','DAY','YEAR','HOUR'],axis=1) df_test_A['hourOfDay'] = df_test_A.index.hour df_test_A['dayOfWeek'] = df_test_A.index.dayofweek<load_from_csv>
train=pd.read_csv(".. /input/train.csv") test=pd.read_csv(".. /input/test.csv") print("Train dataset has {} samples and {} attributes".format(*train.shape)) print("Test dataset has {} samples and {} attributes".format(*test.shape))
Titanic - Machine Learning from Disaster
2,614,604
df_test_B = pd.read_csv(root/'Btest.dat',header=None,delim_whitespace=True)\ .rename(columns={0:'Date',1:'HorizRad',2:'SE_Rad',3:'S_Rad',4:'SW_Rad'}) df_test_B.set_index('Date', inplace=True) df_test_B.reset_index(inplace=True) df_test_B['Day'] = np.floor(df_test_B['Date']) df_test_B['Time'] = df_test_B['Date'] - df_test_B['Day'] df_test_B.drop('Date', axis=1, inplace=True )<data_type_conversions>
n=len(train) surv_0=len(train[train['Survived']==0]) surv_1=len(train[train['Survived']==1]) print("% of passanger survived in train dataset: ",surv_1*100/n) print("% of passanger not survived in train dataset: ",surv_0*100/n )
Titanic - Machine Learning from Disaster
2,614,604
df_train_A = pd.read_csv(root/'Atrain.dat',delim_whitespace=True) df_train_A['Datetime'] = pd.to_datetime(( df_train_A['YEAR']+1900 ).astype('str')\ +'-'+(df_train_A['MONTH'] ).astype('str')\ +'-'+(df_train_A['DAY'] ).astype('str')\ +' '+(df_train_A['HOUR']/100 ).astype('int' ).astype('str')\ +':00') df_train_A.set_index('Datetime', inplace=True) df_train_A = df_train_A.drop(['MONTH','DAY','YEAR','HOUR'],axis=1) df_train_A['hourOfDay'] = df_train_A.index.hour df_train_A['dayOfWeek'] = df_train_A.index.dayofweek df_train_A<feature_engineering>
cat=['Pclass','Sex','Embarked'] num=['Age','SibSp','Parch','Fare']
Titanic - Machine Learning from Disaster
2,614,604
df_train_B = pd.read_csv(root/'Btrain.dat',header=None,delim_whitespace=True)\ .rename(columns={0:'Date',1:'HorizRad',2:'SE_Rad',3:'S_Rad',4:'SW_Rad',5:'Beam_Rad'}) df_train_B.set_index('Date', inplace=True) df_train_B.sort_index(inplace=True) df_train_B.reset_index(inplace=True) df_train_B['Day'] = np.floor(df_train_B['Date']) df_train_B['Time'] = df_train_B['Date'] - df_train_B['Day'] df_train_B.drop('Date', axis=1, inplace=True) df_train_B<concatenate>
csq=chi2_contingency(pd.crosstab(train['Survived'], train['Sex'])) print("P-value: ",csq[1] )
Titanic - Machine Learning from Disaster
2,614,604
df_weather_merged = df_train_A[['TEMP','SOLAR']].append(df_test_A[['TEMP','SOLAR']]) df_weather_merged = df_weather_merged.sort_index() df_weather_merged<define_variables>
csq2=chi2_contingency(pd.crosstab(train['Survived'], train['Embarked'])) print("P-value: ",csq2[1] )
Titanic - Machine Learning from Disaster
2,614,604
alpha_data = {} MAPE_data = {} RSQUARED_data = {} NMBE_data = {} CVRSME_data = {} data_test_A = df_test_A.copy() data_test_B = df_test_B.copy()<merge>
csq3=chi2_contingency(pd.crosstab(train['Survived'], train['Pclass'])) print("P-value: ",csq3[1] )
Titanic - Machine Learning from Disaster
2,614,604
for name_meter in ['WBE','WBCW','WBHW']: data_train_A = df_train_A.copy() data_train_A = data_train_A.merge(df_weather_merged, left_index=True, right_index=True) feat_train_A = data_train_A.drop(['WBE','WBCW','WBHW'],axis=1 ).copy() label_train_A = data_train_A[name_meter].copy() feat_test_A = df_test_A.copy() feat_test_A = feat_test_A.merge(df_weather_merged, left_index=True, right_index=True) print('Power Meter: '+name_meter) LGB_model = LGBMRegressor() data_train_A[name_meter + '_pred'] = cross_val_predict(LGB_model, feat_train_A, label_train_A, cv=5) LGB_model.fit(feat_train_A, label_train_A) data_test_A[name_meter + '_pred'] = LGB_model.predict(feat_test_A) errors = abs(data_train_A[name_meter + '_pred'] - label_train_A) errors_mean = round(np.mean(errors), 2) MAPE = 100 * np.mean(( errors / label_train_A)) NMBE = 100 *(sum(label_train_A - data_train_A[name_meter + '_pred'])/(pd.Series(label_train_A ).count() * np.mean(label_train_A))) CVRSME = 100 *(( sum(( label_train_A - data_train_A[name_meter + '_pred'])**2)/(pd.Series(label_train_A ).count() -1)) **(0.5)) / np.mean(label_train_A) RSQUARED = r2_score(label_train_A, data_train_A[name_meter + '_pred']) print("MAPE: "+str(round(MAPE,2))) print("NMBE: "+str(round(NMBE,2))) print("CVRSME: "+str(round(CVRSME,2))) print("R SQUARED: "+str(round(RSQUARED,2))) data_train_A[[name_meter,name_meter + '_pred']].iplot(kind='scatter', filename='cufflinks/cf-simple-line') print('-----------------------------------------------------------' )<find_best_model_class>
print(train.isnull().sum() )
Titanic - Machine Learning from Disaster
2,614,604
name_meter = 'Beam_Rad' data_train_B = df_train_B.copy() feat_train_B = data_train_B.drop('Beam_Rad',axis=1 ).copy() label_train_B = data_train_B[name_meter].copy() feat_test_B = df_test_B.copy() print('Power Meter: '+name_meter) LGB_model = LGBMRegressor() data_train_B[name_meter + '_pred'] = cross_val_predict(LGB_model, feat_train_B, label_train_B, cv=5) LGB_model.fit(feat_train_B, label_train_B) data_test_B[name_meter + '_pred'] = LGB_model.predict(feat_test_B) errors = abs(data_train_B[name_meter + '_pred'] - label_train_B) errors_mean = round(np.mean(errors), 2) MAPE = 100 * np.mean(( errors / label_train_B)) NMBE = 100 *(sum(label_train_B - data_train_B[name_meter + '_pred'])/(pd.Series(label_train_B ).count() * np.mean(label_train_B))) CVRSME = 100 *(( sum(( label_train_B - data_train_B[name_meter + '_pred'])**2)/(pd.Series(label_train_B ).count() -1)) **(0.5)) / np.mean(label_train_B) RSQUARED = r2_score(label_train_B, data_train_B[name_meter + '_pred']) print("MAPE: "+str(round(MAPE,2))) print("NMBE: "+str(round(NMBE,2))) print("CVRSME: "+str(round(CVRSME,2))) print("R SQUARED: "+str(round(RSQUARED,2))) data_train_B[[name_meter,name_meter + '_pred']].iplot(kind='scatter', filename='cufflinks/cf-simple-line') print('-----------------------------------------------------------' )<feature_engineering>
print(test.isnull().sum() )
Titanic - Machine Learning from Disaster
2,614,604
df_submit.loc[df_submit['row_id'].str.endswith('WBE'), 'target'] = data_test_A[['WBE_pred']].melt() ['value'].values df_submit.loc[df_submit['row_id'].str.endswith('WBCW'), 'target'] = data_test_A[['WBCW_pred']].melt() ['value'].values df_submit.loc[df_submit['row_id'].str.endswith('WBHW'), 'target'] = data_test_A[['WBHW_pred']].melt() ['value'].values df_submit.loc[df_submit['row_id'].str.endswith('true_beam_insolation'), 'target'] = data_test_B['Beam_Rad_pred'].values df_submit<save_to_csv>
med=np.nanmedian(train['Age']) train['Age']=train['Age'].fillna(med) test['Age']=test['Age'].fillna(med )
Titanic - Machine Learning from Disaster
2,614,604
df_submit.to_csv('submission.csv', index=False )<create_dataframe>
train['Cabin'].value_counts()
Titanic - Machine Learning from Disaster
2,614,604
modelling_data_feat = np.load('.. /input/x_train.npy') modelling_data_target = np.load('.. /input/y_train.npy') submit_data_feat = np.load('.. /input/x_test.npy') df_mod_feat = pd.DataFrame(data=modelling_data_feat) df_sub_feat = pd.DataFrame(data=submit_data_feat) rs = 4319<categorify>
train['Cabin']=train['Cabin'].fillna(0) test['Cabin']=test['Cabin'].fillna(0 )
Titanic - Machine Learning from Disaster
2,614,604
df_mod_feat = df_mod_feat.drop(columns=['waterfront','lat','long']) df_sub_feat = df_sub_feat.drop(columns=['waterfront','lat','long']) df_mod_feat_cat = df_mod_feat[['zipcode']].copy() df_mod_feat_num = df_mod_feat.drop(columns=['zipcode']) df_sub_feat_cat = df_sub_feat[['zipcode']].copy() df_sub_feat_num = df_sub_feat.drop(columns=['zipcode']) cat_allfeat = pd.concat([df_mod_feat_cat, df_sub_feat_cat]) enc = OneHotEncoder() temp = enc.fit_transform(cat_allfeat) temp = pd.DataFrame(temp.todense()) df_mod_feat_cat_conv = temp.iloc[:df_mod_feat_cat.shape[0]] df_sub_feat_cat_conv = temp.iloc[df_mod_feat_cat.shape[0]:cat_allfeat.shape[0]] df_sub_feat_cat_conv = df_sub_feat_cat_conv.reset_index(drop=True) for i in range(0, df_mod_feat_num.shape[0]): df_mod_feat_num.loc[i,'date'] = int(df_mod_feat_num.loc[i,'date'][0:4]) if df_mod_feat_num.loc[i,'yr_renovated'] == 0: df_mod_feat_num.loc[i,'yr_renovated'] = df_mod_feat_num.loc[i,'yr_built'] for i in range(0, df_sub_feat_num.shape[0]): df_sub_feat_num.loc[i,'date'] = int(df_sub_feat_num.loc[i,'date'][0:4]) if df_sub_feat_num.loc[i,'yr_renovated'] == 0: df_sub_feat_num.loc[i,'yr_renovated'] = df_sub_feat_num.loc[i,'yr_built'] df_mod_feat = pd.concat([df_mod_feat_cat_conv, df_mod_feat_num], axis=1) df_sub_feat = pd.concat([df_sub_feat_cat_conv, df_sub_feat_num], axis=1) <split>
train['Embarked'].value_counts()
Titanic - Machine Learning from Disaster
2,614,604
def mean_absolute_percentage_error(y_true, y_pred): return np.mean(np.abs(( y_true - y_pred)/ y_true)) * 100 X_train, X_test, y_train, y_test = train_test_split(df_mod_feat, modelling_data_target, test_size=0.1, random_state=rs) model = LinearRegression(fit_intercept=False) selector = RFECV(estimator=model, step=1, min_features_to_select=1, cv=StratifiedKFold(n_splits=5, random_state=rs),scoring='r2') feat_res = selector.fit(X_train, y_train) print("Optimal number of features : %d" % selector.n_features_) sel_X_train = feat_res.transform(X_train) sel_X_test = feat_res.transform(X_test) y_true, y_pred = y_test, feat_res.estimator_.predict(sel_X_test) mean_absolute_percentage_error(y_true, y_pred )<save_to_csv>
train['Cabin']=train['Cabin'].fillna("S" )
Titanic - Machine Learning from Disaster
2,614,604
submission_features = feat_res.transform(df_sub_feat) test_predictions = feat_res.estimator_.predict(submission_features) submission = pd.DataFrame({'Id': range(1, test_predictions.shape[0]+1), 'Price': test_predictions}) submission = submission.reset_index(drop=True) submission.to_csv('submission.csv', index=False) submission<define_variables>
med=np.nanmedian(train['Fare']) test['Fare']=test['Fare'].fillna(med )
Titanic - Machine Learning from Disaster
2,614,604
%matplotlib inline data_dir = '.. /input/'<load_from_csv>
train['hasCabin']=train['Cabin'].apply(lambda x: 0 if x==0 else 1) test['hasCabin']=test['Cabin'].apply(lambda x: 0 if x==0 else 1 )
Titanic - Machine Learning from Disaster
2,614,604
def categories_to_indicators(in_df): new_df = in_df.copy() new_df['IsMale'] = in_df['PatientSex'].map(lambda x: 'M' in x ).astype(float) new_df['IsAP'] = in_df['ViewPosition'].map(lambda x: 'AP' in x ).astype(float) return new_df.drop(['PatientSex', 'ViewPosition'], axis=1) full_train_df = categories_to_indicators(pd.read_csv(os.path.join(data_dir, 'train_all.csv'))) full_stack = imread(os.path.join(data_dir, 'train.tif')) full_train_df['image'] = full_train_df['slice_idx'].map(lambda x: full_stack[x]) full_train_df.sample(3 )<load_from_csv>
train['FamilyMem']=train.apply(lambda x: x['SibSp']+x['Parch'], axis=1) test['FamilyMem']=test.apply(lambda x: x['SibSp']+x['Parch'], axis=1 )
Titanic - Machine Learning from Disaster
2,614,604
submission_test_df = categories_to_indicators(pd.read_csv(os.path.join(data_dir, 'test_info.csv'))) test_stack = imread(os.path.join(data_dir, 'test.tif')) submission_test_df['image'] = submission_test_df['slice_idx'].map(lambda x: full_stack[x]) submission_test_df.sample(3 )<train_model>
train['title']=train['Name'].apply(get_title) test['title']=test['Name'].apply(get_title )
Titanic - Machine Learning from Disaster
2,614,604
def fit_and_score(in_model, feature_maker, rescale=True): full_features = feature_maker(full_train_df) full_labels = full_train_df['opacity'] submission_feat = feature_maker(submission_test_df) train_feat, valid_feat, train_lab, valid_lab = train_test_split(full_features, full_labels, test_size=0.25, random_state=2018) if rescale: feature_scaler = RobustScaler() train_feat = feature_scaler.fit_transform(train_feat) valid_feat = feature_scaler.transform(valid_feat) submission_feat = feature_scaler.transform(submission_feat) in_model.fit(train_feat, train_lab) predictions = in_model.predict_proba(valid_feat)[:, 1] predicted_class = predictions>0.5 tpr, fpr, _ = roc_curve(valid_lab, predictions) auc = roc_auc_score(valid_lab, predictions) fig,(ax1, ax2)= plt.subplots(1, 2, figsize=(20, 10)) ax1.plot(tpr, fpr, 'r.-', label='Prediction(AUC:{:2.2f})'.format(auc)) ax1.plot(tpr, tpr, 'k-', label='Random Guessing') ax1.legend() ax1.set_title('ROC Curve') print(classification_report(valid_lab, predicted_class, target_names=['opacity', 'no opacity'])) sns.heatmap(confusion_matrix(valid_lab, predicted_class), annot=True, fmt='4d', ax=ax2) ax2.set_xlabel('Prediction') ax2.set_ylabel('Actual Value') ax2.set_title('Confusion Matrix({:.1%})'.format(accuracy_score(valid_lab, predicted_class))) sub_df = submission_test_df[['tile_id']].copy() sub_df['opacity'] = in_model.predict_proba(submission_feat)[:, 1] sub_df[['tile_id', 'opacity']].to_csv( 'm-{model}-f-{features}-s-{auc:2.0f}.csv'.format( model=in_model.__class__.__name__, features=feature_maker.__name__, auc=100*auc, ), index=False )<train_model>
title_lev1=list(train['title'].value_counts().reset_index() ['index']) title_lev2=list(test['title'].value_counts().reset_index() ['index'] )
Titanic - Machine Learning from Disaster
2,614,604
dum_model = DummyClassifier(strategy='stratified', random_state=2018) def justage(in_df): return in_df[['PatientAge']].values fit_and_score( dum_model, justage )<choose_model_class>
title_lev=list(set().union(title_lev1, title_lev2)) print(title_lev )
Titanic - Machine Learning from Disaster
2,614,604
knn_model = KNeighborsClassifier(1) def table_features(in_df): return in_df[['PatientAge', 'opacity_prior', 'IsMale', 'IsAP']].values fit_and_score( knn_model, table_features )<train_model>
train['title']=pd.Categorical(train['title'], categories=title_lev) test['title']=pd.Categorical(test['title'], categories=title_lev )
Titanic - Machine Learning from Disaster
2,614,604
def basic_image_features(in_df): out_df = in_df[['PatientAge', 'opacity_prior', 'IsMale', 'IsAP']].copy() out_df['Mean_Intensity'] = in_df['image'].map(np.mean) out_df['Std_Intensity'] = in_df['image'].map(np.std) return out_df.values knn_model = KNeighborsClassifier(2) fit_and_score( knn_model, basic_image_features )<train_model>
cols=['Pclass','Sex','Embarked','hasCabin','title'] fcol=['Pclass','Sex','Embarked','hasCabin','title','Age','FamilyMem','Fare']
Titanic - Machine Learning from Disaster
2,614,604
lr_model = LogisticRegression(solver='lbfgs', random_state=2018) fit_and_score( lr_model, basic_image_features )<train_model>
for c in cols: train[c]=train[c].astype('category') test[c]=test[c].astype('category' )
Titanic - Machine Learning from Disaster
2,614,604
rf_model = RandomForestClassifier(random_state=2018) fit_and_score( rf_model, basic_image_features )<train_model>
train_df=train[fcol] test_df=test[fcol]
Titanic - Machine Learning from Disaster
2,614,604
nb_model = GaussianNB() fit_and_score( nb_model, basic_image_features )<train_model>
train_df=pd.get_dummies(train_df, columns=cols, drop_first=True) test_df=pd.get_dummies(test_df, columns=cols, drop_first=True )
Titanic - Machine Learning from Disaster
2,614,604
svm_model = SVC(probability=True) fit_and_score( svm_model, basic_image_features )<train_on_grid>
y=train['Survived']
Titanic - Machine Learning from Disaster
2,614,604
svm_model = SVC(probability=True) fit_and_score( svm_model, texture_features )<prepare_x_and_y>
x_train, x_test, y_train, y_test = train_test_split(train_df, y, test_size=0.3, random_state=42 )
Titanic - Machine Learning from Disaster
2,614,604
c_model = models.Sequential() c_model.add(PTModel(include_top=False, input_shape=full_train_df['image'].iloc[0].shape+(3,), weights='imagenet')) c_model.add(layers.GlobalAvgPool2D()) def vgg_features(in_df): out_df = in_df[['PatientAge', 'opacity_prior', 'IsMale', 'IsAP']].copy() full_image_stack = np.stack(in_df['image'], 0) color_image_stack = np.stack([full_image_stack, full_image_stack, full_image_stack], -1 ).astype(float) pp_color_image_stack = preprocess_input(color_image_stack) vgg_features = c_model.predict(pp_color_image_stack) return np.concatenate([out_df.values, vgg_features], 1 )<train_model>
rfc=RandomForestClassifier(random_state=42 )
Titanic - Machine Learning from Disaster
2,614,604
rf_model = RandomForestClassifier(random_state=2018) fit_and_score( rf_model, vgg_features )<train_model>
param_grid = { 'n_estimators': [200, 500], 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth' : [4,5,6,7,8], 'criterion' :['gini', 'entropy'] }
Titanic - Machine Learning from Disaster
2,614,604
xg_model = XGBClassifier() fit_and_score( xg_model, vgg_features )<split>
CV_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5) CV_rfc.fit(x_train, y_train )
Titanic - Machine Learning from Disaster
2,614,604
trn_image, vld_image, trn_label , vld_label = train_test_split(full_train_df['image'], full_train_df['opacity'], test_size=0.25, random_state=2018) trn_image = np.stack(trn_image, 0) vld_image = np.stack(vld_image, 0 )<choose_model_class>
CV_rfc.best_params_
Titanic - Machine Learning from Disaster
2,614,604
out_model = models.Sequential() out_model.add(layers.Reshape(( 64, 64, 1), input_shape=trn_image.shape[1:])) out_model.add(layers.Conv2D(16,(3, 3), padding='valid', activation='relu')) out_model.add(layers.MaxPool2D(( 2, 2))) out_model.add(layers.Conv2D(32,(3, 3), padding='valid', activation='relu')) out_model.add(layers.MaxPool2D(( 2, 2))) out_model.add(layers.Conv2D(64,(3, 3), padding='valid', activation='relu')) out_model.add(layers.MaxPool2D(( 2, 2))) out_model.add(layers.Conv2D(128,(3, 3), padding='valid', activation='relu')) out_model.add(layers.MaxPool2D(( 2, 2))) out_model.add(layers.GlobalAveragePooling2D()) out_model.add(layers.Dense(32, activation='relu')) out_model.add(layers.Dense(1, activation='sigmoid')) out_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['binary_accuracy']) out_model.summary()<train_model>
rfc1=RandomForestClassifier(random_state=42, max_features='auto', n_estimators= 200, max_depth=8, criterion='gini' )
Titanic - Machine Learning from Disaster
2,614,604
fit_results = out_model.fit(trn_image, trn_label, validation_data=(vld_image, vld_label), epochs=100) clear_output()<predict_on_test>
rfc1.fit(x_train, y_train )
Titanic - Machine Learning from Disaster
2,614,604
def custom_cnn_features(in_df): out_df = in_df[['PatientAge', 'opacity_prior', 'IsMale', 'IsAP']].copy() full_image_stack = np.stack(in_df['image'], 0) model_pred = out_model.predict(full_image_stack) return np.concatenate([out_df.values, model_pred], 1 )<train_model>
pred=rfc1.predict(x_test )
Titanic - Machine Learning from Disaster
2,614,604
rf_model = RandomForestClassifier(random_state=2018) fit_and_score( rf_model, custom_cnn_features )<train_model>
print("Accuracy for Random Forest on CV data: ",accuracy_score(y_test,pred))
Titanic - Machine Learning from Disaster
2,614,604
svm_model = SVC(probability=True) fit_and_score( svm_model, custom_cnn_features )<train_model>
op_rf=rfc1.predict(test_df )
Titanic - Machine Learning from Disaster
2,614,604
xg_model = XGBClassifier() fit_and_score( xg_model, custom_cnn_features )<load_from_csv>
op=pd.DataFrame(test['PassengerId']) op['Survived']=op_rf
Titanic - Machine Learning from Disaster
2,614,604
<set_options><EOS>
op.to_csv("Submission.csv", index=False )
Titanic - Machine Learning from Disaster
1,987,765
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv>
show_graphs = True add_interactions = False model_tuning = False feature_selection = False
Titanic - Machine Learning from Disaster
1,987,765
train_df = pd.read_csv('.. /input/train.csv' )<load_from_csv>
used_features = { 'PassengerId': 0, 'Pclass': 1, 'Name': 0, 'LastName': 0, 'Title': 0, 'Sex': 1, 'Sex-female x Pclass-1-2': 1, 'Sex-male x Pclass-3': 0, 'SibSp': 0, 'Parch': 0, 'FamilySize': 0, 'FamilySizeBin': 0, 'IsAloneF': 0, 'Age': 0, 'AgeBin': 1, 'IsChild': 0, 'IsChild x Pclass-1-2': 1, 'Cabin': 0, 'HasCabin': 0, 'CabinType': 0, 'Embarked': 0, 'Ticket': 0, 'TicketSize': 1, 'TicketSizeBin': 0, 'IsAloneT': 1, 'Fare': 0, 'FareOrig': 0, 'FareBin': 1, 'NameFareSize': 0, 'Group': 0, 'GroupSurvived': 1, 'GroupSize': 0 }
Titanic - Machine Learning from Disaster
1,987,765
test_df=pd.read_csv('.. /input/test.csv' )<concatenate>
%matplotlib inline
Titanic - Machine Learning from Disaster
1,987,765
combined_df=pd.concat([train_df[['posts']],test_df[['posts']]] )<feature_engineering>
df_train = pd.read_csv('.. /input/train.csv') df_valid = pd.read_csv('.. /input/test.csv') yt = df_train['Survived'] df_train['Data'] = 'T' df_valid['Data'] = 'V' df_full = pd.concat([df_train, df_valid], sort=False, ignore_index=True) mask_train = df_full['Data'] == 'T' mask_valid = df_full['Data'] == 'V'
Titanic - Machine Learning from Disaster
1,987,765
vectorizer = CountVectorizer(stop_words='english',analyzer = "word",tokenizer = None,preprocessor = None,max_features = 10000) combined_df=vectorizer.fit_transform(combined_df['posts'] )<prepare_x_and_y>
catgroup(df_full.loc[mask_train], 'Pclass', 'Survived' )
Titanic - Machine Learning from Disaster
1,987,765
train = combined_df[:train_df.shape[0]] test = combined_df[train_df.shape[0]:] y=train_df['type']<split>
df_full['LastName'] = df_full['Name'].str.extract('^([^,]+),', expand=False )
Titanic - Machine Learning from Disaster
1,987,765
y_mind = y.apply(lambda x: 0 if x[0] == 'I' else 1) y_energy = y.apply(lambda x: 0 if x[1] == 'S' else 1) y_nature = y.apply(lambda x: 0 if x[2] == 'F' else 1) y_tactics = y.apply(lambda x: 0 if x[3] == 'P' else 1 )<import_modules>
df_full['Title'] = df_full['Name'].str.extract('([A-Za-z]+)\.', expand=False) dict_replace = { "Capt": "Officer", "Col": "Officer", "Countess": "Noble", "Don": "Noble", "Dona": "Noble", "Dr": "Noble", "Jonkheer": "Noble", "Lady": "Noble", "Major": "Officer", "Mlle": "Miss", "Mme": "Mrs", "Ms": "Miss", "Rev": "Noble", "Sir": "Noble", } df_full['Title'] = df_full['Title'].replace(dict_replace )
Titanic - Machine Learning from Disaster
1,987,765
from sklearn.metrics import f1_score from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import cross_validate from sklearn.ensemble import GradientBoostingClassifier<choose_model_class>
catgroup(df_full.loc[mask_train], 'Sex', 'Survived' )
Titanic - Machine Learning from Disaster
1,987,765
clf = GradientBoostingClassifier(n_estimators=200,learning_rate=0.1, random_state=0) kfolds = StratifiedKFold(n_splits=5, shuffle=True, random_state=1) scoring = {'f1': 'f1_micro'}<compute_train_metric>
df_full['Sex'] = df_full['Sex'].map({'male': 0, 'female': 1} )
Titanic - Machine Learning from Disaster
1,987,765
result = cross_validate(clf,train, y_mind, scoring=scoring, cv=kfolds, n_jobs=-1, verbose=1 )<compute_test_metric>
mask_female = df_full['Sex'] == 1 mask_class12 = df_full['Pclass'].isin([1, 2]) df_full['Sex-female x Pclass-1-2'] =(mask_female & mask_class12 ).astype(int )
Titanic - Machine Learning from Disaster
1,987,765
print('Y_mind model performance:') pprint(result) for key in result: print(key + ' : ', result[key].mean() )<compute_train_metric>
catgroup(df_full.loc[mask_train], 'Sex-female x Pclass-1-2', 'Survived' )
Titanic - Machine Learning from Disaster
1,987,765
result = cross_validate(clf,train, y_energy, scoring=scoring, cv=kfolds, n_jobs=-1, verbose=1 )<compute_test_metric>
mask_male = df_full['Sex'] == 0 mask_class3 = df_full['Pclass'] == 3 df_full['Sex-male x Pclass-3'] =(mask_male & mask_class3 ).astype(int )
Titanic - Machine Learning from Disaster
1,987,765
print('Y_energy model performance:') pprint(result) for key in result: print(key + ' : ', result[key].mean() )<compute_train_metric>
catgroup(df_full.loc[mask_train], 'Sex-male x Pclass-3', 'Survived' )
Titanic - Machine Learning from Disaster
1,987,765
result = cross_validate(clf,train, y_nature, scoring=scoring, cv=kfolds, n_jobs=-1, verbose=1 )<compute_test_metric>
df_full['FamilySize'] = df_full['SibSp'] + df_full['Parch'] + 1
Titanic - Machine Learning from Disaster
1,987,765
print('Y_nature model performance:') pprint(result) for key in result: print(key + ' : ', result[key].mean() )<compute_train_metric>
catgroup(df_full[mask_train], 'FamilySize', 'Survived' )
Titanic - Machine Learning from Disaster
1,987,765
result = cross_validate(clf,train, y_tactics, scoring=scoring, cv=kfolds, n_jobs=-1, verbose=1 )<compute_test_metric>
df_full['FamilySizeBin'] = df_full['FamilySizeBin'].map({'alone': 0, 'normal': 1, 'big': 2} )
Titanic - Machine Learning from Disaster
1,987,765
print('Y_tactics model performance:') pprint(result) for key in result: print(key + ' : ', result[key].mean() )<predict_on_test>
df_full['IsAloneF'] =(df_full['FamilySizeBin'] == 0 ).astype(int) catgroup(df_full.loc[mask_train], 'IsAloneF', 'Survived' )
Titanic - Machine Learning from Disaster
1,987,765
clf.fit(train,y_mind) X_t = test predictions_Mind = clf.predict_proba(X_t) Mind = pd.DataFrame(predictions_Mind[:,1], columns = ['Mind'] )<predict_on_test>
mask_noage = df_full['Age'].isnull() df_noage = df_full.loc[mask_noage] df_noage.groupby(['Title', 'Pclass'], as_index=False)['Name'].count()
Titanic - Machine Learning from Disaster
1,987,765
clf.fit(train,y_energy) predictions_Energy = clf.predict_proba(X_t) Energy = pd.DataFrame(predictions_Energy[:,1], columns = ['Energy'] )<predict_on_test>
df_medians = df_full.groupby('Title')['Age'].median() for idx, median in df_medians.iteritems() : mask_group = df_full['Title'] == idx df_full.loc[mask_group & mask_noage, 'Age'] = median
Titanic - Machine Learning from Disaster
1,987,765
clf.fit(train,y_tactics) predictions_Tactic = clf.predict_proba(X_t) Tactic = pd.DataFrame(predictions_Energy[:,1], columns = ['Tactic'] )<predict_on_test>
df_full['IsChild'] =(df_full['Age'] < 16 ).astype(int) catgroup(df_full.loc[mask_train], 'IsChild', 'Survived' )
Titanic - Machine Learning from Disaster
1,987,765
clf.fit(train,y_nature) predictions_Nature = clf.predict_proba(X_t) Nature = pd.DataFrame(predictions_Nature[:,1], columns = ['Nature'] )<concatenate>
mask_class12 = df_full['Pclass'].isin([1, 2]) df_full['IsChild x Pclass-1-2'] = df_full['IsChild'] * mask_class12.astype(int) catgroup(df_full.loc[mask_train], 'IsChild x Pclass-1-2', 'Survived' )
Titanic - Machine Learning from Disaster
1,987,765
submission = pd.concat([Mind,Energy, Nature,Tactic], axis=1) <prepare_output>
df_full['HasCabin'] = df_full['Cabin'].notnull().astype(int) mask_class1 =(df_full['Pclass'] == 1 ).astype(int) print("Correlation with 1st class: ", df_full['HasCabin'].corr(mask_class1)) catgroup(df_full.loc[mask_train], 'HasCabin', 'Survived' )
Titanic - Machine Learning from Disaster
1,987,765
submission['index'] = submission['index'] +1 submission.columns = ['id', 'mind', 'energy', 'nature', 'tactics'] submission.head() <save_to_csv>
mask_noembarked = df_full['Embarked'].isnull() df_full[mask_noembarked]
Titanic - Machine Learning from Disaster
1,987,765
submission.to_csv('submission_Gradient.csv', index=False )<set_options>
df_ticket = df_full.loc[mask_train].groupby('Ticket', as_index=False)['Survived', 'Name'].agg({'Survived': 'mean', 'Name': 'count'}) df_ticket = df_ticket.groupby('Name', as_index=False)['Survived'].mean() df_ticket = df_ticket.sort_values(by='Survived') df_ticket
Titanic - Machine Learning from Disaster
1,987,765
pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) %matplotlib inline if not sys.warnoptions: warnings.simplefilter("ignore") <load_from_csv>
df_ticket = df_full.groupby('Ticket')['Name'].count() df_full['TicketSize'] = df_full['Ticket'].map(df_ticket )
Titanic - Machine Learning from Disaster
1,987,765
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv') test_id = test['id'] train_test_combined = pd.concat([train,test] )<feature_engineering>
df_full['TicketSizeBin'] = df_full['TicketSizeBin'].map({'alone': 0, 'normal': 1, 'big': 2} )
Titanic - Machine Learning from Disaster
1,987,765
def remove_url(string): pattern_url = r'http[s]?://(?:[A-Za-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9A-Fa-f][0-9A-Fa-f])) +' string = re.sub(pattern_url, " ", string) return string train_test_combined['posts'] = train_test_combined['posts'].apply(remove_url )<feature_engineering>
df_full['IsAloneT'] =(df_full['TicketSizeBin'] == 0 ).astype(int) catgroup(df_full.loc[mask_train], 'IsAloneT', 'Survived' )
Titanic - Machine Learning from Disaster
1,987,765
def remove_numbers(string): pattern_number = r'[0-9]*' string = re.sub(pattern_number, "", string) return string train_test_combined['posts'] = train_test_combined['posts'].apply(remove_numbers )<feature_engineering>
fare_scaler = 'TicketSize' df_full['Fare'] = df_full['Fare'] / df_full[fare_scaler]
Titanic - Machine Learning from Disaster
1,987,765
def remove_punc(strings): for punctuation in string.punctuation: strings = strings.replace(punctuation, ' ') return strings train_test_combined['posts'] = train_test_combined['posts'].apply(remove_punc )<feature_engineering>
mask_zerofare = df_full['Fare'] == 0 df_full.loc[mask_zerofare]
Titanic - Machine Learning from Disaster
1,987,765
def remove_extra_spacing(string): string = re.sub('\s+', ' ', string) return string train_test_combined['posts'] = train_test_combined['posts'].apply(remove_extra_spacing )<string_transform>
df_full['Fare'].fillna(df_full['Fare'].median() , inplace=True )
Titanic - Machine Learning from Disaster
1,987,765
stopwords_list = set(stopwords.words("english")) lemmatiser = WordNetLemmatizer() def remove_stopwords(string): string = " ".join([lemmatiser.lemmatize(word)for word in string.split() if(word not in stopwords_list)&(len(word)< 15)]) return string train_test_combined['posts'] = train_test_combined['posts'].apply(remove_stopwords )<categorify>
df_full['FareOrig'] = df_full['Fare'] * df_full[fare_scaler]
Titanic - Machine Learning from Disaster
1,987,765
def generate_targets(data): df = data.copy() df['mind'] = df['type'].apply(lambda x: x[0] == 'E' ).astype('int') df['energy'] = df['type'].apply(lambda x: x[1] == 'N' ).astype('int') df['nature'] = df['type'].apply(lambda x: x[2] == 'T' ).astype('int') df['tactics'] = df['type'].apply(lambda x: x[3] == 'J' ).astype('int') return df[['mind','energy','nature','tactics']] y = generate_targets(train) y.head()<feature_engineering>
df_full['FareBin'] = pd.qcut(df_full['Fare'], 4, labels=False ).astype(int )
Titanic - Machine Learning from Disaster
1,987,765
tfidvectorizer = TfidfVectorizer(stop_words='english',min_df=1,max_df=0.95) tfidfvectorized_X = tfidvectorizer.fit_transform(train_test_combined['posts'] )<split>
df_familygroup = df_full.loc[mask_train].groupby(['LastName', 'FareOrig'], as_index=False)['Survived', 'Name'].agg({'Survived': 'mean', 'Name': 'count'}) df_familygroup = df_familygroup.groupby('Name', as_index=False)['Survived'].mean() df_familygroup = df_familygroup.sort_values(by='Survived') df_familygroup
Titanic - Machine Learning from Disaster
1,987,765
X = tfidfvectorized_X[:len(y)] X_submission = tfidfvectorized_X[len(y):] print(X.shape, y.shape, X_submission.shape) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, stratify=y, random_state=22 )<choose_model_class>
df_familygroup = df_full.groupby(['LastName', 'FareOrig'])['Name'].count() df_full['NameFareSize'] = df_full[['LastName', 'FareOrig']].apply(lambda row: df_familygroup[(row[0], row[1])], axis=1 )
Titanic - Machine Learning from Disaster
1,987,765
LR_model = LogisticRegression() results_index = ['Accuracy','Precision','Recall','F1 Score'] LR_model_results = pd.DataFrame(index = results_index, columns=list(y.columns)) LR_default_submission_dict = {'id':test['id']} print(" Logistic Regression(Default Parameters)") print('-'*len(" Logistic Regression(Default Parameters)")) for attribute in list(y.columns): LR_model.fit(X_train, y_train[attribute]) y_pred = LR_model.predict(X_test) LR_default_submission_dict[attribute] = LR_model.predict(X_submission) LR_model_results.loc['Accuracy',attribute] = accuracy_score(y_test[attribute],y_pred) LR_model_results.loc['Precision',attribute] = precision_score(y_test[attribute],y_pred) LR_model_results.loc['Recall',attribute] = recall_score(y_test[attribute],y_pred) LR_model_results.loc['F1 Score',attribute] = f1_score(y_test[attribute],y_pred) LR_model_results.loc['Log-loss',attribute] = log_loss(y_test[attribute],y_pred) LR_model_results.loc['ROC_AUC',attribute] = roc_auc_score(y_test[attribute],y_pred) print(f'Confusion Matrix({attribute}): ' + str(pd.DataFrame(confusion_matrix(y_test[attribute],y_pred,labels=[0,1])))) print(' ') print(" Logistic Regression Results(Default Parameters)") print("-"*len(" Logistic Regression Results(Default Parameters)")) print(LR_model_results) <choose_model_class>
df_full['Group'] = '' df_groups = df_full.groupby(['LastName', 'FareOrig']) for group, df_group in df_groups: for idx, row in df_group.iterrows() : group_members = df_group.drop(idx)['PassengerId'].tolist() df_full.at[idx, 'Group'] = group_members
Titanic - Machine Learning from Disaster
1,987,765
LR_model = LogisticRegression() grid_parameters = {"C":[0.05,0.5,1,5,10,20,25,30], "solver" : ['lbfgs', 'liblinear', 'sag', 'saga'], "class_weight": ['balanced']} kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=22) results_index = ['Accuracy','Precision','Recall','F1 Score'] LR_model_GSCV_results = pd.DataFrame(index = results_index, columns=list(y.columns)) LR_submission_dict = {'id':test['id']} print(" Logistic Regression(Grid Searched Parameters)") print("-"*len(" Logistic Regression(Grid Searched Parameters ")) print("(Best parameters per attribute)") print(" ") for attribute in list(y.columns): LR_model_GSCV = GridSearchCV(LR_model, param_grid = grid_parameters, scoring = 'f1', cv = kfold) LR_model_GSCV.fit(X_train, y_train[attribute]) print(f'{attribute}:',LR_model_GSCV.best_params_) print(" ") best_model = LR_model_GSCV.best_estimator_ y_pred = best_model.predict(X_test) LR_submission_dict[attribute] = best_model.predict(X_submission) LR_model_GSCV_results.loc['Accuracy',attribute] = accuracy_score(y_test[attribute],y_pred) LR_model_GSCV_results.loc['Precision',attribute] = precision_score(y_test[attribute],y_pred) LR_model_GSCV_results.loc['Recall',attribute] = recall_score(y_test[attribute],y_pred) LR_model_GSCV_results.loc['F1 Score',attribute] = f1_score(y_test[attribute],y_pred) LR_model_GSCV_results.loc['Log-loss',attribute] = log_loss(y_test[attribute],best_model.predict(X_test)) LR_model_GSCV_results.loc['ROC_AUC',attribute] = roc_auc_score(y_test[attribute],y_pred) print(f'Confusion Matrix({attribute}): ' + str(pd.DataFrame(confusion_matrix(y_test[attribute],y_pred,labels=[0,1])))) print(" ") print("-"*len(" Logistic Regression Results(Grid Searched Parameters)")) print(" Logistic Regression Results(Grid Searched Parameters)") print("-"*len(" Logistic Regression Results(Grid Searched Parameters)")) print(LR_model_GSCV_results) LR_submission_df = pd.DataFrame(LR_submission_dict) <choose_model_class>
df_groups = df_full.groupby('Ticket') for group, df_group in df_groups: for idx, row in df_group.iterrows() : group_members = df_group.drop(idx)['PassengerId'].tolist() df_full.at[idx, 'Group'].extend(group_members) df_full['Group'] = df_full['Group'].map(set )
Titanic - Machine Learning from Disaster
1,987,765
KNN_model = KNeighborsClassifier(weights='distance', n_neighbors=3) results_index = ['Accuracy','Precision','Recall','F1 Score'] KNN_model_results = pd.DataFrame(index = results_index, columns=list(y.columns)) KNN_submission_dict = {'id':test['id']} print(" K-Nearest-Neighbors Results ") print('-'*len(" K-Nearest-Neighbors Results ")) for attribute in list(y.columns): KNN_model.fit(X_train, y_train[attribute]) y_pred = KNN_model.predict(X_test) KNN_submission_dict[attribute] = KNN_model.predict(X_submission) KNN_model_results.loc['Accuracy',attribute] = accuracy_score(y_test[attribute],y_pred) KNN_model_results.loc['Precision',attribute] = precision_score(y_test[attribute],y_pred) KNN_model_results.loc['Recall',attribute] = recall_score(y_test[attribute],y_pred) KNN_model_results.loc['F1 Score',attribute] = f1_score(y_test[attribute],y_pred) KNN_model_results.loc['Log-loss',attribute] = log_loss(y_test[attribute],y_pred) KNN_model_results.loc['ROC_AUC',attribute] = roc_auc_score(y_test[attribute],y_pred) print(f'Confusion Matrix({attribute}): ' + str(pd.DataFrame(confusion_matrix(y_test[attribute],y_pred,labels=[0,1])))) print(' ') print(" K-Nearest-Neighbors Results ") print("-"*49) print(KNN_model_results )<choose_model_class>
def group_survived(group): mask_group = df_full['PassengerId'].isin(group) s = df_full.loc[mask_group, 'Survived'].max() return s if pd.notnull(s)else 0.5 df_full['GroupSurvived'] = df_full['Group'].apply(group_survived )
Titanic - Machine Learning from Disaster
1,987,765
RFC_model = RandomForestClassifier() grid_parameters = {"n_estimators":[100,200], "class_weight": ['balanced']} kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=22) results_index = ['Accuracy','Precision','Recall','F1 Score'] RFC_model_GSCV_results = pd.DataFrame(index = results_index, columns=list(y.columns)) RFC_submission_dict = {'id':test['id']} print(" Random Forest Classifier(Grid Searched Parameters)") print("-"*len(" Random Forest Classifier(Grid Searched Parameters)")) print("(Best parameters per attribute)") print(" ") for attribute in list(y.columns): RFC_model_GSCV = GridSearchCV(RFC_model, param_grid = grid_parameters, scoring = 'neg_log_loss', cv = kfold) RFC_model_GSCV.fit(X_train, y_train[attribute]) print(f'{attribute}:',RFC_model_GSCV.best_params_) print(" ") best_model = RFC_model_GSCV.best_estimator_ y_pred = best_model.predict(X_test) RFC_submission_dict[attribute] = best_model.predict(X_submission) RFC_model_GSCV_results.loc['Accuracy',attribute] = accuracy_score(y_test[attribute],y_pred) RFC_model_GSCV_results.loc['Precision',attribute] = precision_score(y_test[attribute],y_pred) RFC_model_GSCV_results.loc['Recall',attribute] = recall_score(y_test[attribute],y_pred) RFC_model_GSCV_results.loc['F1 Score',attribute] = f1_score(y_test[attribute],y_pred) RFC_model_GSCV_results.loc['Log-loss',attribute] = log_loss(y_test[attribute],y_pred) RFC_model_GSCV_results.loc['ROC_AUC',attribute] = roc_auc_score(y_test[attribute],y_pred) print(f'Confusion Matrix({attribute}): ' + str(pd.DataFrame(confusion_matrix(y_test[attribute],y_pred,labels=[0,1])))) print(" ") print("-"*len(" Random Forest Classifier Results(Grid Searched Parameters)")) print(" Random Forest Classifier Results(Grid Searched Parameters)") print("-"*len(" Random Forest Classifier Results(Grid Searched Parameters)")) print(RFC_model_GSCV_results )<save_to_csv>
df_full['GroupSize'] = df_full['Group'].str.len() + 1
Titanic - Machine Learning from Disaster
1,987,765
LR_submission_df = pd.DataFrame(LR_submission_dict) LR_submission_df.to_csv("LR_submission_df.csv",index=False )<set_options>
df_fullgroup = df_full.loc[mask_train].groupby('GroupSize', as_index=False)['Survived'].mean() df_fullgroup = df_fullgroup.sort_values(by='Survived') df_fullgroup
Titanic - Machine Learning from Disaster
1,987,765
%matplotlib inline class color: BOLD = '\033[1m' UNDERLINE = '\033[4m' END = '\033[0m' warnings.filterwarnings("ignore" )<load_from_csv>
list_drop_features = [name for name, include in used_features.items() if not include] df_full.drop(columns=list_drop_features, inplace=True )
Titanic - Machine Learning from Disaster
1,987,765
df_train = pd.read_csv('.. /input/train.csv') df_train.head()<load_from_csv>
base_columns = ['Survived', 'Data'] data_columns = [col for col in df_full.columns if col not in base_columns] scaler = StandardScaler() df_full.loc[mask_train, data_columns] = scaler.fit_transform(df_full.loc[mask_train, data_columns]) df_full.loc[mask_valid, data_columns] = scaler.transform(df_full.loc[mask_valid, data_columns] )
Titanic - Machine Learning from Disaster
1,987,765
test_df = pd.read_csv('.. /input/test.csv') test_df.head()<train_model>
Xt = df_full.loc[mask_train].drop(columns=base_columns) Xv = df_full.loc[mask_valid].drop(columns=base_columns )
Titanic - Machine Learning from Disaster
1,987,765
print('Training set consist of', round(len(df_train)/8675*100),'% data') print('Testing set consist of', round(len(test_df)/8675*100),'% data' )<string_transform>
models = {} models['Logistic Regression'] = LogisticRegression(penalty='l2', C=1.0, random_state=0, solver='saga', max_iter=300) models['SVC_rbf'] = SVC(probability=True, kernel='rbf', gamma='scale', random_state=0) models['SVC_lin'] = SVC(probability=True, kernel='linear', random_state=0) models['KNN'] = KNeighborsClassifier(algorithm='auto', leaf_size=20, metric='minkowski', metric_params=None, n_jobs=1, n_neighbors=10, p=3, weights='uniform') dtree_params = {'criterion': 'entropy', 'max_depth': 5, 'max_features': None, 'min_impurity_decrease': 0.0, 'min_samples_leaf': 0.01, 'min_samples_split': 0.01, 'min_weight_fraction_leaf': 0.0, 'splitter': 'best'} models['Decision Tree'] = DecisionTreeClassifier(**dtree_params) models['Random Forest'] = RandomForestClassifier(criterion='entropy', n_estimators=200, oob_score=True) xgb_params = {'subsample': 0.5, 'reg_lambda': 5, 'reg_alpha': 0, 'n_estimators': 200, 'min_child_weight': 0, 'max_depth': 6, 'max_delta_step': 1, 'learning_rate': 1.0, 'gamma': 2, 'colsample_bytree': 0.5, 'colsample_bylevel': 0.5} models['XGBoost'] = XGBClassifier(objective='binary:logistic', **xgb_params )
Titanic - Machine Learning from Disaster
1,987,765
len(df_train.iloc[1,1].split('|||'))<count_values>
scoring = ['accuracy'] cv = StratifiedShuffleSplit(n_splits=10, test_size=0.3, random_state=0) for mname, model in models.items() : result = cross_validate(model, Xt, yt, scoring=scoring, cv=cv, return_train_score=False) print("CV results for model {}: mean {:2.4f}, std {:2.4f}".format(mname, np.mean(result['test_accuracy']), np.std(result['test_accuracy'])) )
Titanic - Machine Learning from Disaster
1,987,765
train_val_count=df_train['type'].value_counts() train_val_count<count_values>
if model_tuning: param_grid_XGBoost = { 'colsample_bytree': [0.1, 0.5, 1], 'colsample_bylevel': [0.1, 0.5, 1], 'subsample': [0.1, 0.5, 1], 'learning_rate': [0.05, 0.1, 0.3, 1.0], 'max_depth': [0, 3, 6, 10], 'reg_alpha': [0, 0.1, 1, 5], 'reg_lambda': [0, 0.1, 1, 5], 'gamma': [0, 1, 2, 5], 'n_estimators': [100, 200, 300, 500], 'min_child_weight': [0, 1, 2, 5], 'max_delta_step': [0, 1, 2, 5], } param_grid_DTC = { 'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [None, 3, 5, 7, 10], 'min_samples_split': [2, 0.01, 0.05, 0.1], 'min_samples_leaf': [1, 0.01, 0.05, 0.1], 'min_weight_fraction_leaf': [0.0, 0.1, 0.2], 'max_features': [None, 'auto'], 'min_impurity_decrease': [0.0, 0.2, 0.4, 0.7], } tune_model = GridSearchCV(models['Decision Tree'], param_grid=param_grid_DTC, scoring='roc_auc', cv=cv) tune_model.fit(Xt, yt) print('Best parameters: ', tune_model.best_params_ )
Titanic - Machine Learning from Disaster
1,987,765
<define_variables><EOS>
list_submit = models.keys() dict_submissions = {} for mname in list_submit: model = models[mname] model.fit(Xt, yt) ytp = model.predict(Xt) acc = model.score(Xt, yt) yvp = model.predict(Xv) dict_submissions[mname] = yvp submission = pd.DataFrame({"PassengerId": df_valid["PassengerId"], "Survived": yvp}) submission.to_csv('submission_{}.csv'.format(mname), index=False )
Titanic - Machine Learning from Disaster
1,981,782
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<create_dataframe>
warnings.filterwarnings('ignore') print('Python: {}'.format(sys.version)) print('numpy: {}'.format(np.__version__)) print('pandas: {}'.format(pd.__version__)) print('scipy: {}'.format(sp.__version__)) print('matplotlib: {}'.format(matplotlib.__version__)) print('sklearn: {}'.format(sklearn.__version__)) print('-'*30) print(check_output(["ls", ".. /input"] ).decode("utf8"))
Titanic - Machine Learning from Disaster
1,981,782
temp = {'Introverts':[77], 'Extroverts':[23], 'Intuition':[86], 'Sensing':[14], 'Thinking':[54], 'Feeling':[46], 'Judging':[60], 'Perceiving':[40]} results = pd.DataFrame.from_dict(temp, orient='index', columns=['Percentages']) results<feature_engineering>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv') datasets = [train, test]
Titanic - Machine Learning from Disaster
1,981,782
my_func= lambda x: float(x) results['Percentages']=results['Percentages'].apply(my_func )<prepare_x_and_y>
print('Null training values: ', train.isnull().sum()) print("-"*30) print('Test/Validation columns with null values: ', test.isnull().sum() )
Titanic - Machine Learning from Disaster
1,981,782
p = df_train.copy() z = test_df.copy()<feature_engineering>
for d in datasets: d['FamilySize'] = d['SibSp'] + d['Parch'] + 1 d['IsAlone'] = 1 d['IsAlone'].loc[d['FamilySize'] > 1] = 0 d['Title'] = d['Name'].str.extract('([A-Za-z]+)\.', expand=False) d['Title'] = d['Title'].replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Other') d['Title'] = d['Title'].replace('Mlle', 'Miss') d['Title'] = d['Title'].replace('Ms', 'Miss') d['Title'] = d['Title'].replace('Mme', 'Mrs') d['FareBand'] = pd.qcut(d['Fare'], 4) d['AgeBand'] = pd.cut(d['Age'].astype(int), 5) train.info() test.info() train.head()
Titanic - Machine Learning from Disaster