kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
1,316,433
test["matchType_2"] = "-" test.loc[(test.matchType == "solo-fpp")| (test.matchType == "duo-fpp")| (test.matchType == "squad-fpp")| (test.matchType == "normal-solo-fpp")| (test.matchType == "normal-duo-fpp")| (test.matchType == "normal-squad-fpp")| (test.matchType == "crashfpp")| (test.matchType == "flarefpp"), "matchType_2"] = "fpp" test.loc[(test.matchType == "solo")| (test.matchType == "duo")| (test.matchType == "squad")| (test.matchType == "normal-solo")| (test.matchType == "normal-duo")| (test.matchType == "normal-squad")| (test.matchType == "crashtpp")| (test.matchType == "flaretpp"), "matchType_2"] = "tpp"<feature_engineering>
print(train[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).mean() )
Titanic - Machine Learning from Disaster
1,316,433
test["solo"] = 0 test["duo"] = 0 test["squad"] = 0 test["etc"] = 0 test.loc[test.matchType_1 == "solo", "solo"] = 1 test.loc[test.matchType_1 == "duo", "duo"] = 1 test.loc[test.matchType_1 == "squad", "squad"] = 1 test.loc[test.matchType_1 == "etc", "etc"] = 1<feature_engineering>
print(train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).count() )
Titanic - Machine Learning from Disaster
1,316,433
test["fpp"] = 0 test["tpp"] = 0 test.loc[test.matchType_2 == "fpp", "fpp"] = 1 test.loc[test.matchType_2 == "tpp", "tpp"] = 1<feature_engineering>
full_data = [train, test]
Titanic - Machine Learning from Disaster
1,316,433
<feature_engineering>
for dataset in full_data: dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 print(train[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False ).mean() )
Titanic - Machine Learning from Disaster
1,316,433
<categorify>
for dataset in full_data: dataset['IsAlone'] = 0 dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
Titanic - Machine Learning from Disaster
1,316,433
<create_dataframe>
print(train[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False ).mean() )
Titanic - Machine Learning from Disaster
1,316,433
<feature_engineering>
for dataset in full_data: dataset['Embarked'] = dataset['Embarked'].fillna('S') print(train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).mean() )
Titanic - Machine Learning from Disaster
1,316,433
<rename_columns>
for dataset in full_data: avg_age = round(dataset['Age'].mean() ,2) dataset['Age'][np.isnan(dataset['Age'])] = avg_age train['CategoricalAge'] = pd.cut(train['Age'], 5) print(train[['CategoricalAge', 'Survived']].groupby(['CategoricalAge'], as_index=False ).mean() )
Titanic - Machine Learning from Disaster
1,316,433
test.set_index("Id", inplace=True) test.index.name = "Id"<prepare_x_and_y>
for dataset in full_data: dataset['Fare'] = dataset['Fare'].fillna(train['Fare'].median()) train['CategoricalFare'] = pd.qcut(train['Fare'], 4) print(train[['CategoricalFare', 'Survived']].groupby(['CategoricalFare'], as_index=False ).mean() )
Titanic - Machine Learning from Disaster
1,316,433
temp_1 = test.loc[:, feature_1] temp_2 = test.loc[:, feature_2]<feature_engineering>
def get_title(name): title_search = name.split(",")[1].split(".")[0].strip() if title_search: return title_search else: return "" for dataset in full_data: dataset['Title'] = dataset['Name'].apply(get_title )
Titanic - Machine Learning from Disaster
1,316,433
temp_1.groupby("matchId" ).transform(minmax) for i in temp_2.columns[:4]: temp_2[i] =(temp_2[i] - min(temp_2[i])) /(max(temp_2[i])- min(temp_2[i]))<merge>
for dataset in full_data: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') print(train[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean() )
Titanic - Machine Learning from Disaster
1,316,433
X = pd.merge(temp_1, temp_2, on="Id") X = pd.merge(X, test.loc[:, ["matchType_1", "winPlacePerc"]], on="Id" )<feature_engineering>
for dataset in full_data: dataset['Sex'] = dataset['Sex'].map({'female': 0, 'male': 1} ).astype(int) title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int) dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0 dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1 dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2 dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3 dataset['Fare'] = dataset['Fare'].astype(int) dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0 dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1 dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2 dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3 dataset.loc[ dataset['Age'] > 64, 'Age'] = 4 drop_elements = ['Name', 'Ticket', 'Cabin', 'SibSp',\ 'Parch', 'FamilySize'] train = train.drop(drop_elements, axis = 1) train = train.drop(['CategoricalAge', 'CategoricalFare'], axis = 1) test = test.drop(drop_elements, axis = 1) print(train.head(10))
Titanic - Machine Learning from Disaster
1,316,433
<feature_engineering>
cols=['Pclass', 'Sex', 'Age', 'Fare', 'Embarked', 'IsAlone', 'Title'] X=train[cols] Y=train['Survived']
Titanic - Machine Learning from Disaster
1,316,433
<drop_column>
logreg = LogisticRegression() logreg.fit(X, Y) logreg.score(X, Y )
Titanic - Machine Learning from Disaster
1,316,433
test = X test.reset_index()<predict_on_test>
train1, test1 = train_test_split(train, test_size=0.2 )
Titanic - Machine Learning from Disaster
1,316,433
result_1 = model_1.predict(test.loc[test.matchType_1 == "solo", list_feat]) result_2 = model_2.predict(test.loc[test.matchType_1 == "duo", list_feat]) result_3 = model_3.predict(test.loc[test.matchType_1 == "squad", list_feat]) result_4 = model_4.predict(test.loc[test.matchType_1 == "etc", list_feat] )<create_dataframe>
X1=train1[cols] Y1=train1['Survived'] logreg.fit(X1, Y1) logreg.score(X1, Y1 )
Titanic - Machine Learning from Disaster
1,316,433
temp = pd.DataFrame(test.loc[test.matchType_1 == "solo", "Id"] ).append(pd.DataFrame(test.loc[test.matchType_1 == "duo", "Id"])).append(pd.DataFrame(test.loc[test.matchType_1 == "squad", "Id"])).append(pd.DataFrame(test.loc[test.matchType_1 == "etc", "Id"])) _ = pd.DataFrame(result_1, columns = ["winPlacePerc"] ).append(pd.DataFrame(result_2, columns = ["winPlacePerc"])).append(pd.DataFrame(result_3, columns = ["winPlacePerc"])).append(pd.DataFrame(result_4, columns = ["winPlacePerc"]))<concatenate>
X1_test = test1[cols] Y1_test = test1['Survived'] Y3test_pred = logreg.predict(X1_test) print('Accuracy of logistic regression classifier on test set: {:.2f}'.format(logreg.score(X1_test, Y1_test)) )
Titanic - Machine Learning from Disaster
1,316,433
result = pd.concat([temp.reset_index(drop=True), _.reset_index(drop=True)], axis=1 )<count_missing_values>
from sklearn.ensemble import RandomForestClassifier
Titanic - Machine Learning from Disaster
1,316,433
np.sum(result.winPlacePerc.isna() )<count_values>
model = RandomForestClassifier(random_state=42 )
Titanic - Machine Learning from Disaster
1,316,433
np.sum(result.winPlacePerc < 0 )<count_values>
param_grid = { 'n_estimators': [200, 500], 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth' : [4,5,6,7,8], 'criterion' :['gini', 'entropy'] }
Titanic - Machine Learning from Disaster
1,316,433
np.sum(result.winPlacePerc > 1 )<save_to_csv>
CV_rfc = GridSearchCV(estimator=model, param_grid=param_grid, cv= 5) CV_rfc.fit(X1, Y1 )
Titanic - Machine Learning from Disaster
1,316,433
result.loc[result.winPlacePerc.isna() , "winPlacePerc"] = 0 result.loc[result.winPlacePerc < 0, "winPlacePerc"] = 0 result.loc[result.winPlacePerc > 1, "winPlacePerc"] = 1 result.to_csv('submission.csv', index=False )<set_options>
CV_rfc.best_params_
Titanic - Machine Learning from Disaster
1,316,433
!pip install ultimate gc.enable()<define_variables>
rfc1=RandomForestClassifier(random_state=42, max_features='auto', n_estimators= 200, max_depth=5, criterion='gini' )
Titanic - Machine Learning from Disaster
1,316,433
INPUT_DIR = ".. /input/"<load_from_csv>
rfc1.fit(X1, Y1 )
Titanic - Machine Learning from Disaster
1,316,433
def feature_engineering(is_train=True): if is_train: print("processing train_V2.csv") df = pd.read_csv(INPUT_DIR + 'train_V2.csv') df = df[df['maxPlace'] > 1] else: print("processing test_V2.csv") df = pd.read_csv(INPUT_DIR + 'test_V2.csv') state('totalDistance') s = timer() df['totalDistance'] = df['rideDistance'] + df["walkDistance"] + df["swimDistance"] e = timer() state('totalDistance', False, e - s) state('rankPoints') s = timer() df['rankPoints'] = np.where(df['rankPoints'] <= 0 ,0 , df['rankPoints']) e = timer() state('rankPoints', False, e-s) target = 'winPlacePerc' features = list(df.columns) features.remove("Id") features.remove("matchId") features.remove("groupId") features.remove("matchDuration") features.remove("matchType") y = None if is_train: y = np.array(df.groupby(['matchId','groupId'])[target].agg('mean'), dtype=np.float64) features.remove(target) print("get group mean feature") agg = df.groupby(['matchId','groupId'])[features].agg('mean') agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index() if is_train: df_out = agg.reset_index() [['matchId','groupId']] else: df_out = df[['matchId','groupId']] df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId']) df_out = df_out.merge(agg_rank, suffixes=["_mean", "_mean_rank"], how='left', on=['matchId', 'groupId']) print("get group max feature") agg = df.groupby(['matchId','groupId'])[features].agg('max') agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index() df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId']) df_out = df_out.merge(agg_rank, suffixes=["_max", "_max_rank"], how='left', on=['matchId', 'groupId']) print("get group min feature") agg = df.groupby(['matchId','groupId'])[features].agg('min') agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index() df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId']) df_out = df_out.merge(agg_rank, suffixes=["_min", "_min_rank"], how='left', on=['matchId', 'groupId']) print("get group size feature") agg = df.groupby(['matchId','groupId'] ).size().reset_index(name='group_size') df_out = df_out.merge(agg, how='left', on=['matchId', 'groupId']) print("get match mean feature") agg = df.groupby(['matchId'])[features].agg('mean' ).reset_index() df_out = df_out.merge(agg, suffixes=["", "_match_mean"], how='left', on=['matchId']) print("get match size feature") agg = df.groupby(['matchId'] ).size().reset_index(name='match_size') df_out = df_out.merge(agg, how='left', on=['matchId']) df_out.drop(["matchId", "groupId"], axis=1, inplace=True) X = np.array(df_out, dtype=np.float64) del df, df_out, agg, agg_rank gc.collect() return X, y<normalization>
pred=rfc1.predict(X1_test )
Titanic - Machine Learning from Disaster
1,316,433
%%time x_train, y = feature_engineering(True) scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1), copy=False ).fit(x_train )<train_model>
print("Accuracy for Random Forest on CV data: ",accuracy_score(Y1_test,pred))
Titanic - Machine Learning from Disaster
1,316,433
print("x_train", x_train.shape, x_train.max() , x_train.min()) scaler.transform(x_train) print("x_train", x_train.shape, x_train.max() , x_train.min() )<choose_model_class>
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier, ExtraTreesClassifier from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier, plot_importance from sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV
Titanic - Machine Learning from Disaster
1,316,433
%%time NN_model = Sequential() NN_model.add(Dense(x_train.shape[1], input_dim = x_train.shape[1], activation='relu')) NN_model.add(Dense(136, activation='relu')) NN_model.add(Dense(136, activation='relu')) NN_model.add(Dense(136, activation='relu')) NN_model.add(Dense(136, activation='relu')) NN_model.add(Dense(1, activation='linear')) NN_model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mean_absolute_error']) NN_model.summary()<train_model>
clf_rf = RandomForestClassifier() clf_et = ExtraTreesClassifier() clf_bc = BaggingClassifier() clf_ada = AdaBoostClassifier() clf_dt = DecisionTreeClassifier() clf_xg = XGBClassifier() clf_lr = LogisticRegression() clf_svm = SVC()
Titanic - Machine Learning from Disaster
1,316,433
%%time NN_model.fit(x=x_train, y=y, batch_size=1000, epochs=30, verbose=1, callbacks=callbacks_list, validation_split=0.15, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None) del x_train, y gc.collect()<normalization>
npX = np.array(X ).copy() npy = np.array(Y ).copy()
Titanic - Machine Learning from Disaster
1,316,433
x_test, _ = feature_engineering(False) scaler.transform(x_test) print("x_test", x_test.shape, x_test.max() , x_test.min()) np.clip(x_test, out=x_test, a_min=-1, a_max=1) print("x_test", x_test.shape, x_test.max() , x_test.min() )<predict_on_test>
Classifiers = ['RandomForest','ExtraTrees','Bagging','AdaBoost','DecisionTree','XGBoost','LogisticRegression','SVM'] scores = [] models = [clf_rf, clf_et, clf_bc, clf_ada, clf_dt, clf_xg, clf_lr, clf_svm] for model in models: score = cross_val_score(model, npX, npy, scoring = 'accuracy', cv = 10, n_jobs = -1 ).mean() scores.append(score )
Titanic - Machine Learning from Disaster
1,316,433
%%time pred = NN_model.predict(x_test) del x_test gc.collect()<prepare_output>
mode = pd.DataFrame(scores, index = Classifiers, columns = ['score'] ).sort_values(by = 'score', ascending = False )
Titanic - Machine Learning from Disaster
1,316,433
pred = pred.reshape(-1) pred =(pred + 1)/ 2<load_from_csv>
parameters_xg = {'max_depth':[3,6,7], 'learning_rate': [0.1,0.2], 'n_estimators': [300,200], 'min_child_weight': [4], 'reg_alpha': [6,0], 'reg_lambda': [1,8],'max_delta_step':[2], 'gamma':[0],'seed':[1]} parameters_svm = {'C':[0.9,0.01],'kernel':['rbf','linear'], 'gamma':[0,0.1,'auto'], 'probability':[True,False], 'random_state':[0,7,16],'decision_function_shape':['ovo','ovr'],'degree':[3,4,10]}
Titanic - Machine Learning from Disaster
1,316,433
df_test = pd.read_csv(INPUT_DIR + 'test_V2.csv' )<feature_engineering>
def grid(model,parameters): grid = GridSearchCV(estimator = model, param_grid = parameters, cv = 10, scoring = 'accuracy') grid.fit(npX,npy) return grid.best_score_, grid.best_estimator_.get_params()
Titanic - Machine Learning from Disaster
1,316,433
%%time print("fix winPlacePerc") for i in range(len(df_test)) : winPlacePerc = pred[i] maxPlace = int(df_test.iloc[i]['maxPlace']) if maxPlace == 0: winPlacePerc = 0.0 elif maxPlace == 1: winPlacePerc = 1.0 else: gap = 1.0 /(maxPlace - 1) winPlacePerc = round(winPlacePerc / gap)* gap if winPlacePerc < 0: winPlacePerc = 0.0 if winPlacePerc > 1: winPlacePerc = 1.0 pred[i] = winPlacePerc<feature_engineering>
best_score_svm, best_params_svm = grid(clf_svm, parameters_svm) print(best_score_svm )
Titanic - Machine Learning from Disaster
1,316,433
df_test['winPlacePerc'] = pred<save_to_csv>
x = StandardScaler().fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(x, Y, test_size =.2) X_train_reduced = PCA(n_components = 2 ).fit_transform(X_train) X_test_reduced = PCA(n_components= 2 ).fit_transform(X_test )
Titanic - Machine Learning from Disaster
1,316,433
submission = df_test[['Id', 'winPlacePerc']] submission.to_csv('submission.csv', index=False )<load_from_csv>
def boundaries(model, heading, best_params): Model = model(**best_params) Model.fit(X_train_reduced, y_train) X_set, y_set = np.concatenate([X_train_reduced, X_test_reduced], axis = 0), np.concatenate([y_train, y_test], axis = 0) X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01), np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01)) plt.contourf(X1, X2, Model.predict(np.array([X1.ravel() , X2.ravel() ] ).T ).reshape(X1.shape), alpha = 0.5, cmap = ListedColormap(( 'k', 'blue'))) plt.xlim(X1.min() , X1.max()) plt.ylim(X2.min() , X2.max()) for i, j in enumerate(np.unique(y_set)) : plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(( 'red', 'green'))(i), label = j) plt.xticks(fontsize = 3) plt.yticks(fontsize = 3 )
Titanic - Machine Learning from Disaster
1,316,433
def import_training_data(nrows=None): df=pd.read_csv('.. /input/train_V2.csv', sep=',', encoding='utf-8', nrows=nrows) df=df.drop(['rankPoints'],axis=1) return df def import_test_data(nrows=None): df_test=pd.read_csv('.. /input/test_V2.csv', sep=',', encoding='utf-8', nrows=nrows) df_test=df_test.drop(['rankPoints'],axis=1) return df_test<feature_engineering>
clf_svm = SVC(**best_params_svm) clf_svm.fit(npX,npy)
Titanic - Machine Learning from Disaster
1,316,433
df=import_training_data()<drop_column>
test_df = test[cols]
Titanic - Machine Learning from Disaster
1,316,433
df_squad=df[(df['matchType'].str.contains('squad')) |(df['matchType'].str.contains('flare')) ].drop('matchType',axis=1) df_duo=df[df['matchType'].str.contains('duo')].drop('matchType',axis=1) df_solo=df[df['matchType'].str.contains('solo')].drop(columns=['DBNOs','revives'] ).drop('matchType',axis=1) df_crash=df[df['matchType'].str.contains('crash')].drop('matchType',axis=1 ).drop('killPoints', axis=1 ).drop('winPoints', axis=1 )<drop_column>
nptest = np.array(test_df) pred = clf_svm.predict(nptest)
Titanic - Machine Learning from Disaster
1,316,433
df_test=import_test_data() df_test_squad=df_test[(df_test['matchType'].str.contains('squad')) |(df_test['matchType'].str.contains('flare')) ].drop('matchType',axis=1) df_test_duo=df_test[df_test['matchType'].str.contains('duo')].drop('matchType',axis=1) df_test_solo=df_test[df_test['matchType'].str.contains('solo')].drop('matchType',axis=1 ).drop(columns=['DBNOs','revives']) df_test_crash=df_test[df_test['matchType'].str.contains('crash')].drop('matchType',axis=1 ).drop('killPoints', axis=1 ).drop('winPoints', axis=1 )<drop_column>
predictions = pd.DataFrame(pred, index = test_df.index, columns = ['Survived'])
Titanic - Machine Learning from Disaster
1,316,433
len_df_test=len(df_test) del df del df_test<sort_values>
pred_df=pd.DataFrame(test['PassengerId']) pred_df['Survived']=predictions
Titanic - Machine Learning from Disaster
1,316,433
<groupby><EOS>
pred_df.to_csv('predictions_svm_titanic_final.csv',index=False )
Titanic - Machine Learning from Disaster
6,515,315
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<train_model>
import numpy as np import pandas as pd import scipy.special import matplotlib.pyplot as plt import os import random
Titanic - Machine Learning from Disaster
6,515,315
def prepare_pca(matches_combined_teams, in_components=None): print(len(matches_combined_teams[1].drop(['winPlacePerc'],axis=1 ).columns)) ipca = IncrementalPCA(n_components=in_components) for i in matches_combined_teams: ipca.partial_fit(i.drop(['winPlacePerc'],axis=1 ).values) print(ipca.explained_variance_ratio_) print(ipca.n_components_) return ipca def prepare_pca_input(df_train, df_test, dfeatures=None): df_train_in=[] df_train_groupids=[] for i in df_train: df_train_groupids.append(i[['groupId','maxPlace']]) if len(dfeatures)>0: df_train_in.append(i.drop(['groupId','maxPlace'],axis=1 ).drop(dfeatures,axis=1 ).reindex(sorted(i.drop(['groupId','maxPlace'],axis=1 ).drop(dfeatures,axis=1 ).columns), axis=1)) else: df_train_in.append(i.drop(['groupId','maxPlace'],axis=1 ).reindex(sorted(i.drop(['groupId','maxPlace'],axis=1 ).columns), axis=1)) df_test_in=[] df_test_groupids=[] for df, dic in df_test: df_test_groupids.append([df[['groupId','maxPlace']],dic]) if len(dfeatures)>0: df_test_in.append(df.drop(['groupId','maxPlace'],axis=1 ).drop(dfeatures,axis=1 ).reindex(sorted(df.drop(['groupId','maxPlace'],axis=1 ).drop(dfeatures,axis=1 ).columns), axis=1)) else: df_test_in.append(df.drop(['groupId','maxPlace'],axis=1 ).reindex(sorted(df.drop(['groupId','maxPlace'],axis=1 ).columns), axis=1)) return df_train_in, df_train_groupids, df_test_in, df_test_groupids def pca_transform_data(matches_combined_teams, matches_combined_teams_test, ipca): x_train_pca = [] for i in matches_combined_teams: x_train_pca.append(ipca.transform(i.drop('winPlacePerc',axis=1 ).values)) y_train_pca=[] for i in matches_combined_teams: y_train_pca.append(i['winPlacePerc'].values.reshape(-1, 1)) x_test_pca = [] for i in matches_combined_teams_test: x_test_pca.append(ipca.transform(i.values)) return x_train_pca, y_train_pca, x_test_pca<normalization>
from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.preprocessing import LabelEncoder, MinMaxScaler from sklearn.metrics import accuracy_score from xgboost import XGBClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier
Titanic - Machine Learning from Disaster
6,515,315
def scale_data(x_train_pca, x_test_pca): scaler = MinMaxScaler() for i in x_train_pca: scaler.partial_fit(i) x_train_scaled=[] for i in x_train_pca: x_train_scaled.append(scaler.transform(i)) x_test_scaled=[] for i in x_test_pca: x_test_scaled.append(scaler.transform(i)) return x_train_scaled, x_test_scaled<train_model>
warnings.filterwarnings("ignore" )
Titanic - Machine Learning from Disaster
6,515,315
def mlp_evaluate(x_train, y_train, df_train_groupids, training_level=100, verbose=False): working_sample=np.random.choice(len(x_train), training_level*5) mlp=MLPRegressor(solver='adam',hidden_layer_sizes=(50,50,)) train_counter=0 lenxtrain=len(x_train) kfold_score=[] for k in range(training_level): for i in range(4): mlp.partial_fit(x_train[working_sample[train_counter]], y_train[working_sample[train_counter]].ravel()) train_counter+=1 y_pred_kfold=pd.DataFrame(mlp.predict(x_train[working_sample[train_counter]]), columns=['ranks']) y_pred_kfold=y_pred_kfold.join(df_train_groupids[working_sample[train_counter]] ).sort_values(by='ranks', ascending=False) y_test=pd.DataFrame(y_train[working_sample[train_counter]], columns=['winPlacePerc'] ).join(df_train_groupids[working_sample[train_counter]] ).sort_values(by='groupId', ascending=True) tmp_winPlacePerc=[] for i in range(1,len(y_pred_kfold)+1): tmp_winPlacePerc.append([y_pred_kfold['groupId'].values[i-1],(len(y_pred_kfold)-i)/(len(y_pred_kfold)-1)]) tmp_winPlacePerc=pd.DataFrame(tmp_winPlacePerc, columns=['groupId','winPlacePerc'] ).sort_values(by='groupId', ascending=True) kfold_score.append(mean_absolute_error(y_test['winPlacePerc'],tmp_winPlacePerc['winPlacePerc'])) train_counter+=1 if verbose: print("Evaluating "+str(k)+"/"+str(training_level)) print("Current mean bsolut error: "+str(np.mean(kfold_score))) print("Average mean absolute error: "+str(np.mean(kfold_score))) return kfold_score<train_model>
train_data = pd.read_csv(path_in+'train.csv', index_col=0) test_data = pd.read_csv(path_in+'test.csv', index_col=0) samp_subm = pd.read_csv(path_in+'gender_submission.csv', index_col=0 )
Titanic - Machine Learning from Disaster
6,515,315
def mlp_train(x_train, y_train): mlp=MLPRegressor(solver='adam',hidden_layer_sizes=(50,50,)) lenxtrain=len(x_train) for i in range(lenxtrain): mlp.partial_fit(x_train[i], y_train[i].ravel()) return mlp<predict_on_test>
print('number train samples: ', len(train_data.index)) print('number test samples: ', len(test_data.index)) print('number features: ', len(train_data.columns)-1 )
Titanic - Machine Learning from Disaster
6,515,315
def predict_winPlacePerc(x_test, df_test_groupids, mlp, verbose=False): winPlacePerc=[] num_matches=len(x_test) for k in range(num_matches): if verbose and k%100==0: print("Predicting match "+str(k)+"/"+str(num_matches)) y_pred=pd.DataFrame(mlp.predict(x_test[k]), columns=['ranks']) y_pred=y_pred.join(df_test_groupids[k][0] ).sort_values(by='ranks', ascending=False) tmp_winPlacePerc=[] for i in range(1,len(y_pred)+1): tmp_winPlacePerc.append([y_pred['groupId'].values[i-1],(len(y_pred)-i)/(len(y_pred)-1)]) winPlacePerc.append(pd.DataFrame(tmp_winPlacePerc, columns=['groupId','winPlacePerc'] ).sort_values(by='groupId', ascending=True)) result_list=[] counter=0 for df, el in df_test_groupids: match_results=[] for i, gid in el.items() : wpp=winPlacePerc[counter][winPlacePerc[counter]['groupId']==gid]['winPlacePerc'].values match_results.append([i,wpp]) result_list.append(pd.DataFrame(match_results, columns=['Id','winPlacePerc'])) counter+=1 winPlacePerc_tot=pd.concat(result_list) winPlacePerc_tot['winPlacePerc']=winPlacePerc_tot['winPlacePerc'].str[0] return winPlacePerc_tot<define_variables>
train_data.isnull().sum()
Titanic - Machine Learning from Disaster
6,515,315
def predict_one_team_matches(df): result_list=[] counter=0 for df, dic in df: match_results=[] for i, gid in dic.items() : match_results.append([i,0.0]) result_list.append(pd.DataFrame(match_results, columns=['Id','winPlacePerc'])) counter+=1 if len(result_list)>0: return pd.concat(result_list) else: return [] def predict_two_team_matches(df): result_list=[] counter=0 for df, dic in df: match_results=[] killpointsMax=np.max([df['killPlace'].values[0],df['killPlace'].values[1]]) maxKillpointsGId=df[df['killPlace']==killpointsMax]['groupId'].values[0] for i, gid in dic.items() : if maxKillpointsGId in gid: match_results.append([i,0.0]) else: match_results.append([i,1.0]) result_list.append(pd.DataFrame(match_results, columns=['Id','winPlacePerc'])) counter+=1 if len(result_list)>0: return pd.concat(result_list) else: return []<concatenate>
cols_with_missing_train = [col for col in train_data.columns if train_data[col].isnull().any() ] cols_with_missing_test = [col for col in test_data.columns if test_data[col].isnull().any() ]
Titanic - Machine Learning from Disaster
6,515,315
df_red_squad=combine_training_teams(df_squad )<normalization>
print('train missing values: {:0.2f}%'.format(100*train_data['Age'].isna().sum() /len(train_data))) print('test missing values: {:0.2f}%'.format(100*test_data['Age'].isna().sum() /len(test_data)) )
Titanic - Machine Learning from Disaster
6,515,315
df_red_squad_1, df_red_squad_2, df_red_squad=filter_small_teams(df_red_squad )<concatenate>
def fill_age(s): if np.isnan(s)== False: return s else: return random.randrange(age_mean-age_std, age_mean+age_std )
Titanic - Machine Learning from Disaster
6,515,315
df_test_per_match_squad = combine_testing_teams(df_test_squad )<split>
train_data['Age'] = train_data['Age'].apply(fill_age) test_data['Age'] = test_data['Age'].apply(fill_age )
Titanic - Machine Learning from Disaster
6,515,315
df_test_per_match_squad_1, df_test_per_match_squad_2, df_test_per_match_squad=filter_small_teams_test(df_test_per_match_squad )<set_options>
print('train missing values: {:0.2f}%'.format(100*train_data['Cabin'].isna().sum() /len(train_data))) print('test missing values: {:0.2f}%'.format(100*test_data['Cabin'].isna().sum() /len(test_data)) )
Titanic - Machine Learning from Disaster
6,515,315
df_squad.drop(['killPoints','maxPlace','roadKills','teamKills','vehicleDestroys','winPoints','swimDistance'],axis=1 ).corr().style.format("{:.2}" ).background_gradient(cmap=plt.get_cmap('coolwarm'), axis=1 )<drop_column>
train_data['Cabin'] = train_data['Cabin'].fillna('Unknown', inplace=False) test_data['Cabin'] = test_data['Cabin'].fillna('Unknown', inplace=False )
Titanic - Machine Learning from Disaster
6,515,315
del df_squad del df_test_squad<define_variables>
print('train missing values:', train_data['Embarked'].isna().sum() )
Titanic - Machine Learning from Disaster
6,515,315
drop_features_squad=['killPoints','roadKills','teamKills','vehicleDestroys','winPoints']<split>
train_data['Embarked'] = train_data['Embarked'].fillna('Unknown', inplace=False )
Titanic - Machine Learning from Disaster
6,515,315
df_train_in_squad, df_train_groupids_squad, df_test_in_squad, df_test_groupids_squad = prepare_pca_input(df_red_squad, df_test_per_match_squad, drop_features_squad )<concatenate>
print('test missing values:', test_data['Fare'].isna().sum() )
Titanic - Machine Learning from Disaster
6,515,315
pca_squad=prepare_pca(df_train_in_squad, in_components=None )<prepare_x_and_y>
mean = test_data['Fare'].mean() test_data['Fare'] = test_data['Fare'].fillna(mean, inplace=False )
Titanic - Machine Learning from Disaster
6,515,315
x_train_pca_squad, y_train_pca_squad, x_test_pca_squad=pca_transform_data(df_train_in_squad, df_test_in_squad, pca_squad )<drop_column>
train_data['FamilySize'] = train_data['SibSp'] + train_data['Parch'] + 1 test_data['FamilySize'] = test_data['SibSp'] + test_data['Parch'] + 1
Titanic - Machine Learning from Disaster
6,515,315
del df_red_squad del df_test_in_squad del df_train_in_squad<normalization>
def family_group(s): if(s >= 2)&(s <= 4): return 2 elif(( s > 4)&(s <= 7)) |(s == 1): return 1 elif(s > 7): return 0
Titanic - Machine Learning from Disaster
6,515,315
x_train_scaled_squad, x_test_scaled_squad=scale_data(x_train_pca_squad, x_test_pca_squad )<compute_test_metric>
train_data['FamilyGroup'] = train_data['FamilySize'].apply(family_group) test_data['FamilyGroup'] = test_data['FamilySize'].apply(family_group )
Titanic - Machine Learning from Disaster
6,515,315
eval_score_squad=mlp_evaluate(x_train_scaled_squad, y_train_pca_squad, df_train_groupids_squad, training_level=10000, verbose=False )<train_model>
def IsAlone(s): if s==1: return 1 else: return 0
Titanic - Machine Learning from Disaster
6,515,315
mlp_squad=mlp_train(x_train_scaled_squad, y_train_pca_squad )<predict_on_test>
train_data['IsAlone'] = train_data['FamilySize'].apply(IsAlone) test_data['IsAlone'] = test_data['FamilySize'].apply(IsAlone )
Titanic - Machine Learning from Disaster
6,515,315
winPlacePerc_one_team_squad=predict_one_team_matches(df_test_per_match_squad_1) winPlacePerc_two_teams_squad=predict_two_team_matches(df_test_per_match_squad_2 )<predict_on_test>
ticket_group = dict(( train_data['Ticket'].append(test_data['Ticket'])).value_counts()) train_data['TicketGroup'] = train_data['Ticket'].apply(lambda x: ticket_group[x]) test_data['TicketGroup'] = test_data['Ticket'].apply(lambda x: ticket_group[x] )
Titanic - Machine Learning from Disaster
6,515,315
winPlacePerc_squad = predict_winPlacePerc(x_test_scaled_squad, df_test_groupids_squad, mlp_squad, verbose=False) winPlacePerc_squad = winPlacePerc_squad.append(winPlacePerc_two_teams_squad )<concatenate>
train_data['Title'] = train_data['Name'].apply(lambda x:x.split(',')[1].split('.')[0].strip()) test_data['Title'] = test_data['Name'].apply(lambda x:x.split(',')[1].split('.')[0].strip()) title_dict = {} title_dict.update(dict.fromkeys(['Capt', 'Col', 'Major', 'Dr', 'Rev'], 'Officer')) title_dict.update(dict.fromkeys(['Don', 'Sir', 'the Countess', 'Dona', 'Lady'], 'Royalty')) title_dict.update(dict.fromkeys(['Mme', 'Ms', 'Mrs'], 'Mrs')) title_dict.update(dict.fromkeys(['Mlle', 'Miss'], 'Miss')) title_dict.update(dict.fromkeys(['Mr'], 'Mr')) title_dict.update(dict.fromkeys(['Master','Jonkheer'], 'Master')) train_data['Title'] = train_data['Title'].map(title_dict) test_data['Title'] = test_data['Title'].map(title_dict )
Titanic - Machine Learning from Disaster
6,515,315
df_red_duo=combine_training_teams(df_duo )<normalization>
def age_group(s): if s == 0: return -1 elif(s > 0)&(s <= 13): return 1 elif(s > 13)&(s <= 20): return 2 elif(s > 20)&(s <= 30): return 3 elif(s > 30)&(s <= 40): return 4 elif(s > 40)&(s <= 50): return 5 elif(s > 50)&(s <= 60): return 6 elif(s > 60)&(s <= 70): return 7 elif(s > 70)&(s <= 80): return 8
Titanic - Machine Learning from Disaster
6,515,315
df_red_duo_1, df_red_duo_2, df_red_duo=filter_small_teams(df_red_duo )<concatenate>
train_data['AgeGroup'] = train_data['Age'].apply(age_group) test_data['AgeGroup'] = test_data['Age'].apply(age_group )
Titanic - Machine Learning from Disaster
6,515,315
df_test_per_match_duo = combine_testing_teams(df_test_duo )<split>
train_data['FareGroup'] = train_data['Fare'].apply(fare_group) test_data['FareGroup'] = test_data['Fare'].apply(fare_group )
Titanic - Machine Learning from Disaster
6,515,315
df_test_per_match_duo_1, df_test_per_match_duo_2, df_test_per_match_duo=filter_small_teams_test(df_test_per_match_duo )<set_options>
train_data['LastName'] = train_data['Name'].apply(last_name) test_data['LastName'] = test_data['Name'].apply(last_name )
Titanic - Machine Learning from Disaster
6,515,315
df_duo.drop(['killPoints','roadKills','teamKills','vehicleDestroys','winPoints'],axis=1 ).corr().style.format("{:.2}" ).background_gradient(cmap=plt.get_cmap('coolwarm'), axis=1 )<drop_column>
for row in train_data.index: if train_data.loc[row, 'Sex']=='female' or train_data.loc[row, 'Age']<14: train_data.loc[row, 'WomenOrChild'] = 1 else: train_data.loc[row, 'WomenOrChild'] = 0 for row in test_data.index: if test_data.loc[row, 'Sex']=='female' or test_data.loc[row, 'Age']<14: test_data.loc[row, 'WomenOrChild'] = 1 else: test_data.loc[row, 'WomenOrChild'] = 0
Titanic - Machine Learning from Disaster
6,515,315
del df_duo del df_test_duo<define_variables>
train_data['FarePerPerson']=train_data['Fare']/(train_data['FamilySize']+1) test_data['FarePerPerson']=test_data['Fare']/(test_data['FamilySize']+1 )
Titanic - Machine Learning from Disaster
6,515,315
drop_features_duo=['killPoints','roadKills','teamKills','vehicleDestroys','winPoints']<split>
train_data['AgePclass'] = train_data['Age']*train_data['Pclass'] test_data['AgePclass'] = test_data['Age']*test_data['Pclass']
Titanic - Machine Learning from Disaster
6,515,315
df_train_in_duo, df_train_groupids_duo, df_test_in_duo, df_test_groupids_duo = prepare_pca_input(df_red_duo, df_test_per_match_duo, dfeatures=drop_features_duo )<concatenate>
train_data['Cabin'] = train_data['Cabin'].str[0] test_data['Cabin'] = test_data['Cabin'].str[0]
Titanic - Machine Learning from Disaster
6,515,315
pca_duo=prepare_pca(df_train_in_duo, in_components=None )<prepare_x_and_y>
features_cat = ['Sex', 'Cabin', 'Embarked', 'Title'] le = LabelEncoder() for col in features_cat: le.fit(train_data[col]) train_data[col] = le.transform(train_data[col]) test_data[col] = le.transform(test_data[col] )
Titanic - Machine Learning from Disaster
6,515,315
x_train_pca_duo, y_train_pca_duo, x_test_pca_duo=pca_transform_data(df_train_in_duo, df_test_in_duo, pca_duo )<drop_column>
train_data_dummy = pd.get_dummies(train_data['Pclass'], prefix = 'Pclass') train_data = pd.concat([train_data, train_data_dummy], axis=1) del train_data['Pclass'] test_data_dummy = pd.get_dummies(test_data['Pclass'], prefix = 'Pclass') test_data = pd.concat([test_data, test_data_dummy], axis=1) del test_data['Pclass']
Titanic - Machine Learning from Disaster
6,515,315
del df_red_duo del df_test_in_duo del df_train_in_duo<normalization>
no_features = ['Survived', 'Name', 'Ticket', 'LastName']
Titanic - Machine Learning from Disaster
6,515,315
x_train_scaled_duo, x_test_scaled_duo=scale_data(x_train_pca_duo, x_test_pca_duo )<compute_test_metric>
X_train = train_data[train_data.columns.difference(no_features)].copy(deep=False) y_train = train_data['Survived'] X_test = test_data[test_data.columns.difference(no_features)].copy(deep=False )
Titanic - Machine Learning from Disaster
6,515,315
eval_score_duo=mlp_evaluate(x_train_scaled_duo, y_train_pca_duo, df_train_groupids_duo, training_level=10000, verbose=False )<train_model>
min_max = MinMaxScaler() X_train_scaled = min_max.fit_transform(X_train) X_test_scaled = min_max.transform(X_test )
Titanic - Machine Learning from Disaster
6,515,315
mlp_duo=mlp_train(x_train_scaled_duo, y_train_pca_duo )<predict_on_test>
param_grid = {'criterion': ['gini'], 'max_features': [None, 'auto', 'sqrt', 'log2'], 'max_depth': [i for i in range(1, 6)], 'class_weight': [None, 'balanced'], 'min_samples_split': [2, 4, 6, 8, 10 ,12], 'min_samples_leaf': [1, 2, 3, 4], 'random_state': [2020]} grid = GridSearchCV(DecisionTreeClassifier() , param_grid=param_grid, cv=5, scoring='accuracy') grid.fit(X_train_scaled, y_train) best_params = grid.best_params_ print('Best score of cross validation: {:.2f}'.format(grid.best_score_)) print('Best parameters:', best_params )
Titanic - Machine Learning from Disaster
6,515,315
winPlacePerc_one_team_duo=predict_one_team_matches(df_test_per_match_duo_1) winPlacePerc_two_teams_duo=predict_two_team_matches(df_test_per_match_duo_2 )<predict_on_test>
model = DecisionTreeClassifier() model.set_params(**best_params )
Titanic - Machine Learning from Disaster
6,515,315
winPlacePerc_duo = predict_winPlacePerc(x_test_scaled_duo, df_test_groupids_duo, mlp_duo, verbose=False) winPlacePerc_duo=winPlacePerc_duo.append(winPlacePerc_one_team_duo )<set_options>
model.fit(X_train_scaled, y_train )
Titanic - Machine Learning from Disaster
6,515,315
df_solo['winPlacePerc'].fillna(0.0, inplace=True )<concatenate>
y_test = model.predict(X_test_scaled )
Titanic - Machine Learning from Disaster
6,515,315
df_red_solo=combine_training_teams(df_solo )<normalization>
output = pd.DataFrame({'PassengerId': samp_subm.index, 'Survived': y_test}) output.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
6,515,315
<concatenate><EOS>
output['Survived'].value_counts()
Titanic - Machine Learning from Disaster
10,374,790
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<split>
sns.set(context='notebook', style='white', palette='colorblind' )
Titanic - Machine Learning from Disaster
10,374,790
df_test_per_match_solo_1, df_test_per_match_solo_2, df_test_per_match_solo=filter_small_teams_test(df_test_per_match_solo )<drop_column>
from sklearn.preprocessing import StandardScaler, LabelEncoder from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier from sklearn.neighbors import KNeighborsClassifier from xgboost import XGBClassifier from sklearn.model_selection import StratifiedKFold, GridSearchCV, learning_curve, cross_val_score
Titanic - Machine Learning from Disaster
10,374,790
del df_solo del df_test_solo<define_variables>
train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') df = pd.concat([train, test], axis=0, ignore_index=True) print(f'Train:{train.shape} Test:{test.shape} Df:{df.shape}' )
Titanic - Machine Learning from Disaster
10,374,790
drop_features_solo=['killPoints','roadKills','teamKills','winPoints']<split>
df.isna().sum() [df.isna().sum() >0]
Titanic - Machine Learning from Disaster
10,374,790
df_train_in_solo, df_train_groupids_solo, df_test_in_solo, df_test_groupids_solo = prepare_pca_input(df_red_solo, df_test_per_match_solo, dfeatures=drop_features_solo )<concatenate>
df.Sex=df.Sex.map({'male':0, 'female':1} ).astype('int' )
Titanic - Machine Learning from Disaster
10,374,790
pca_solo=prepare_pca(df_train_in_solo, in_components=None )<prepare_x_and_y>
df['Family_Size']=df.SibSp + df.Parch df.groupby('Family_Size')['Survived'].mean()
Titanic - Machine Learning from Disaster
10,374,790
x_train_pca_solo, y_train_pca_solo, x_test_pca_solo=pca_transform_data(df_train_in_solo, df_test_in_solo, pca_solo )<drop_column>
df.Title.value_counts()
Titanic - Machine Learning from Disaster
10,374,790
del df_red_solo del df_test_in_solo del df_train_in_solo<normalization>
df['Family_Size'] = df['Parch'] + df['SibSp']
Titanic - Machine Learning from Disaster
10,374,790
x_train_scaled_solo, x_test_scaled_solo=scale_data(x_train_pca_solo, x_test_pca_solo )<compute_test_metric>
df['Last_Name'] = df['Name'].apply(lambda x: str.split(x, ",")[0]) df['Fare'].fillna(df['Fare'].mean() , inplace=True) DEFAULT_SURVIVAL_VALUE = 0.5 df['Family_Survival'] = DEFAULT_SURVIVAL_VALUE for grp, grp_df in df[['Survived','Name', 'Last_Name', 'Fare', 'Ticket', 'PassengerId', 'SibSp', 'Parch', 'Age', 'Cabin']].groupby(['Last_Name', 'Fare']): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): df.loc[df['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin==0.0): df.loc[df['PassengerId'] == passID, 'Family_Survival'] = 0 for _, grp_df in df.groupby('Ticket'): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : if(row['Family_Survival'] == 0)|(row['Family_Survival']== 0.5): smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): df.loc[df['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin==0.0): df.loc[df['PassengerId'] == passID, 'Family_Survival'] = 0
Titanic - Machine Learning from Disaster
10,374,790
eval_score_solo=mlp_evaluate(x_train_scaled_solo, y_train_pca_solo, df_train_groupids_solo, training_level=10000, verbose=False )<train_model>
df['Family_Survival'].value_counts()
Titanic - Machine Learning from Disaster
10,374,790
mlp_solo=mlp_train(x_train_scaled_solo, y_train_pca_solo )<predict_on_test>
df['Fare'].fillna(df['Fare'].median() , inplace = True) df['FareBin'] = pd.qcut(df['Fare'], 5) label = LabelEncoder() df['FareBin_Code'] = label.fit_transform(df['FareBin']) df.drop(['Fare'], 1, inplace=True )
Titanic - Machine Learning from Disaster
10,374,790
winPlacePerc_one_teams_solo=predict_one_team_matches(df_test_per_match_solo_1) winPlacePerc_two_teams_solo=predict_two_team_matches(df_test_per_match_solo_2 )<predict_on_test>
df['FareBin_Code'].value_counts()
Titanic - Machine Learning from Disaster
10,374,790
winPlacePerc_solo = predict_winPlacePerc(x_test_scaled_solo, df_test_groupids_solo, mlp_solo, verbose=False) winPlacePerc_solo = winPlacePerc_solo.append(winPlacePerc_one_teams_solo ).append(winPlacePerc_two_teams_solo )<concatenate>
titles = ['Dr', 'Master', 'Miss', 'Mr', 'Mrs', 'Rev'] for title in titles: age_to_impute = df.groupby('Title')['Age'].median() [titles.index(title)] df.loc[(df['Age'].isnull())&(df['Title'] == title), 'Age'] = age_to_impute
Titanic - Machine Learning from Disaster
10,374,790
df_red_crash=combine_training_teams(df_crash )<feature_engineering>
df['AgeBin'] = pd.qcut(df['Age'], 4) label = LabelEncoder() df['AgeBin_Code'] = label.fit_transform(df['AgeBin'] )
Titanic - Machine Learning from Disaster
10,374,790
df_red_crash_1, df_red_crash_2, df_red_crash=filter_small_teams(df_red_crash )<concatenate>
df['AgeBin_Code'].value_counts()
Titanic - Machine Learning from Disaster
10,374,790
df_test_per_match_crash = combine_testing_teams(df_test_crash )<split>
df.drop(['Name', 'PassengerId', 'SibSp', 'Parch', 'Ticket', 'Cabin', 'Embarked', 'Last_Name', 'FareBin', 'AgeBin', 'Survived', 'Title', 'Age'], axis = 1, inplace = True )
Titanic - Machine Learning from Disaster