kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
10,374,790 | df_test_per_match_crash_1, df_test_per_match_crash_2, df_test_per_match_crash=filter_small_teams_test(df_test_per_match_crash )<set_options> | X_train = df[:len(train)]
X_test = df[len(train):]
y_train = train['Survived'] | Titanic - Machine Learning from Disaster |
10,374,790 | df_crash.drop(['headshotKills','numGroups','swimDistance','teamKills'],axis=1 ).corr().style.format("{:.2}" ).background_gradient(cmap=plt.get_cmap('coolwarm'), axis=1 )<drop_column> | scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test ) | Titanic - Machine Learning from Disaster |
10,374,790 | del df_crash
del df_test_crash<define_variables> | kfold = StratifiedKFold(n_splits=8 ) | Titanic - Machine Learning from Disaster |
10,374,790 | drop_features_crash=['headshotKills','swimDistance','teamKills']<split> | RFC = RandomForestClassifier()
rf_param_grid = {"max_depth": [None],
"max_features": [3,"sqrt", "log2"],
"min_samples_split": [n for n in range(1, 9)],
"min_samples_leaf": [5, 7],
"bootstrap": [False, True],
"n_estimators" :[200, 500],
"criterion": ["gini", "entropy"]}
rf_param_grid_best = {"max_depth": [None],
"max_features": [3],
"min_samples_split": [4],
"min_samples_leaf": [5],
"bootstrap": [False],
"n_estimators" :[200],
"criterion": ["gini"]}
gs_rf = GridSearchCV(RFC, param_grid = rf_param_grid_best, cv=kfold, scoring="roc_auc", n_jobs= 4, verbose = 1)
gs_rf.fit(X_train, y_train)
rf_best = gs_rf.best_estimator_
RFC.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
10,374,790 | df_train_in_crash, df_train_groupids_crash, df_test_in_crash, df_test_groupids_crash = prepare_pca_input(df_red_crash, df_test_per_match_crash, dfeatures=drop_features_crash )<concatenate> | print(f'RandomForest GridSearch best params: {gs_rf.best_params_}
')
print(f'RandomForest GridSearch best score: {gs_rf.best_score_}')
print(f'RandomForest score: {RFC.score(X_train,y_train)}' ) | Titanic - Machine Learning from Disaster |
10,374,790 | pca_crash=prepare_pca(df_train_in_crash, in_components=None )<prepare_x_and_y> | KNN = KNeighborsClassifier()
knn_param_grid = {'algorithm': ['auto'],
'weights': ['uniform', 'distance'],
'leaf_size': [20, 25, 30],
'n_neighbors': [12, 14, 16]}
knn_best_param_grid = {'algorithm': ['auto'],
'weights': ['uniform'],
'leaf_size': [25],
'n_neighbors': [14]}
gs_knn = GridSearchCV(KNN, param_grid = knn_best_param_grid, cv=kfold, scoring = "roc_auc", n_jobs= 4, verbose = 1)
gs_knn.fit(X_train, y_train)
KNN.fit(X_train, y_train)
knn_best = gs_knn.best_estimator_ | Titanic - Machine Learning from Disaster |
10,374,790 | x_train_pca_crash, y_train_pca_crash, x_test_pca_crash=pca_transform_data(df_train_in_crash, df_test_in_crash, pca_crash )<drop_column> | print(f'KNN GridSearch best params: {gs_knn.best_params_}')
print()
print(f'KNN GridSearch best score: {gs_knn.best_score_}')
print(f'KNN score: {KNN.score(X_train,y_train)}' ) | Titanic - Machine Learning from Disaster |
10,374,790 | del df_red_crash
del df_test_in_crash
del df_train_in_crash<normalization> | knn1 = KNeighborsClassifier(algorithm='auto', leaf_size=26, metric='minkowski',
metric_params=None, n_jobs=1, n_neighbors=6, p=2,
weights='uniform')
knn1.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
10,374,790 | x_train_scaled_crash, x_test_scaled_crash=scale_data(x_train_pca_crash, x_test_pca_crash )<compute_test_metric> | print(f'KNN score - 2nd model: {knn1.score(X_train, y_train)}' ) | Titanic - Machine Learning from Disaster |
10,374,790 | eval_score_crash=mlp_evaluate(x_train_scaled_crash, y_train_pca_crash, df_train_groupids_crash, training_level=len(x_train_scaled_crash), verbose=False )<train_model> | GB = GradientBoostingClassifier()
gb_param_grid = {'loss' : ["deviance"],
'n_estimators' : [1000],
'learning_rate': [0.02, 0.05],
'min_samples_split': [15, 20, 25],
'max_depth': [4, 6],
'min_samples_leaf': [50, 60],
'max_features': ["sqrt"]
}
gb_param_grid_best = {'loss' : ["deviance"],
'n_estimators' : [1000],
'learning_rate': [0.02],
'min_samples_split': [25],
'max_depth': [4],
'min_samples_leaf': [60],
'max_features': ["sqrt"]
}
gs_gb = GridSearchCV(GB, param_grid = gb_param_grid_best, cv=kfold, scoring="roc_auc", n_jobs= 4, verbose = 1)
gs_gb.fit(X_train,y_train)
GB.fit(X_train, y_train)
gb_best = gs_gb.best_estimator_ | Titanic - Machine Learning from Disaster |
10,374,790 | mlp_crash=mlp_train(x_train_scaled_crash, y_train_pca_crash )<predict_on_test> | print(f'GradienBoost GridSearch best params: {gs_gb.best_params_}')
print()
print(f'GradienBoost GridSearch best score: {gs_gb.best_score_}')
print(f'GradienBoost score: {GB.score(X_train, y_train)}' ) | Titanic - Machine Learning from Disaster |
10,374,790 | winPlacePerc_one_teams_crash=predict_one_team_matches(df_test_per_match_crash_1)
winPlacePerc_two_teams_crash=predict_two_team_matches(df_test_per_match_crash_2 )<predict_on_test> | XGB = XGBClassifier()
xgb_param_grid = {'learning_rate':[0.05, 0.1],
'reg_lambda':[0.3, 0.5],
'gamma': [0.8, 1],
'subsample': [0.8, 1],
'max_depth': [2, 3],
'n_estimators': [200, 300]
}
xgb_param_grid_best = {'learning_rate':[0.1],
'reg_lambda':[0.3],
'gamma': [1],
'subsample': [0.8],
'max_depth': [2],
'n_estimators': [300]
}
gs_xgb = GridSearchCV(XGB, param_grid = xgb_param_grid_best, cv=kfold, scoring="roc_auc", n_jobs= 4, verbose = 1)
gs_xgb.fit(X_train,y_train)
XGB.fit(X_train, y_train)
xgb_best = gs_xgb.best_estimator_ | Titanic - Machine Learning from Disaster |
10,374,790 | winPlacePerc_crash = predict_winPlacePerc(x_test_scaled_crash, df_test_groupids_crash, mlp_crash, verbose=False )<compute_test_metric> | print(f'XGB GridSearch best params: {gs_xgb.best_params_}')
print()
print(f'XGB GridSearch best score: {gs_xgb.best_score_}')
print(f'XGB score: {XGB.score(X_train, y_train)}' ) | Titanic - Machine Learning from Disaster |
10,374,790 | <concatenate><EOS> | results=pd.DataFrame({'PassengerId':test['PassengerId'], 'Survived':knn1.predict(X_test)})
results.to_csv("Titanic_prediction.csv", index=False)
print('Done!' ) | Titanic - Machine Learning from Disaster |
721,688 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<drop_column> | sns.set_style('whitegrid')
%matplotlib inline
| Titanic - Machine Learning from Disaster |
721,688 | df_sub=df_sub.reset_index().drop('index',axis=1 )<save_to_csv> | df_train=pd.read_csv('.. /input/train.csv')
df_test=pd.read_csv('.. /input/test.csv')
df_titanic=pd.concat([df_train,df_test] ).reset_index(drop=True)
df_titanic.info()
| Titanic - Machine Learning from Disaster |
721,688 | df_sub.to_csv('submission.csv', index=False )<string_transform> | df_age=df_titanic["Age"].dropna()
mean_age=int(df_titanic["Age"].dropna().mean())
std_age=int(df_titanic["Age"].dropna().std())
df_titanic["Age"]=df_titanic["Age"].apply(lambda x: np.random.randint(int(mean_age-std_age),int(mean_age+std_age)) if pd.isnull(x)else x ) | Titanic - Machine Learning from Disaster |
721,688 | def take_part_of_data(df, part):
match_ids = df['matchId'].unique()
match_ids_part = np.random.choice(match_ids, int(part * len(match_ids)))
df = df[df['matchId'].isin(match_ids_part)]
del match_ids
del match_ids_part<feature_engineering> | print(df_titanic[pd.isnull(df_titanic["Embarked"])] ) | Titanic - Machine Learning from Disaster |
721,688 | def add_new_features_1(df):
df['totalDistance'] = df['rideDistance'] + df["walkDistance"] + df["swimDistance"]
df['healsAndBoosts'] = df['heals'] + df['boosts']
df['headshotKillsOverKills'] = df['headshotKills'] / df['kills']
df['headshotKillsOverKills'].fillna(0, inplace=True)
df['killStreaksOverKills'] = df['killStreaks'] / df['kills']
df['killStreaksOverKills'].fillna(0, inplace=True)
df['killsAndAssists'] = df['kills'] + df['assists']
df['assistsAndRevives'] = df['assists'] + df['revives']<feature_engineering> | df_titanic["Embarked"]=df_titanic["Embarked"].fillna("C" ) | Titanic - Machine Learning from Disaster |
721,688 | def add_new_features_2(df):
df['playersJoined'] = df.groupby('matchId')['matchId'].transform('count')
df['killsAndAssistsOverPlayersJoined'] = df['killsAndAssists'] *(( 100 - df['playersJoined'])/ 100 + 1)
df['matchDurationOverPlayersJoined'] = df['matchDuration'] *(( 100 - df['playersJoined'])/ 100 + 1)
df['damageDealtOverPlayersJoined'] = df['damageDealt'] *(( 100 - df['playersJoined'])/ 100 + 1 )<feature_engineering> | print(df_titanic[pd.isnull(df_titanic["Fare"])])
mean=df_titanic[df_titanic["Embarked"]=='S']["Fare"].mean()
std=df_titanic[df_titanic["Embarked"]=='S']["Fare"].std()
df_titanic["Fare"]=df_titanic["Fare"].fillna(random.randint(int(mean-std),int(mean+std)) ) | Titanic - Machine Learning from Disaster |
721,688 | def add_new_features_3(df):
df['totalDistanceOverKillsAndAssists'] = df['totalDistance'] / df['killsAndAssists']
df['totalDistanceOverKillsAndAssists'].fillna(0, inplace=True)
df['totalDistanceOverKillsAndAssists'].replace(np.inf, 0, inplace=True)
df['totalDistanceOverHealsAndBoosts'] = df['totalDistance'] / df['healsAndBoosts']
df['totalDistanceOverHealsAndBoosts'].fillna(0, inplace=True)
df['totalDistanceOverHealsAndBoosts'].replace(np.inf, 0, inplace=True )<feature_engineering> | df_titanic.loc[df_titanic["Age"]<=16,"Sex"]="Child"
df_titanic.loc[(df_titanic["Age"]>16)&(df_titanic["Parch"]>0)&(df_titanic["Sex"]=="female"),"Sex"]="Mother" | Titanic - Machine Learning from Disaster |
721,688 | def add_new_features_4(df):
df['headshotRate'] = df['kills'] / df['headshotKills']
df['killStreakRate'] = df['killStreaks'] / df['kills']
df['healsAndBoosts'] = df['heals'] + df['boosts']
df['totalDistance'] = df['rideDistance'] + df['walkDistance'] + df['swimDistance']
df['killPlaceOverMaxPlace'] = df['killPlace'] / df['maxPlace']
df['headshotKillsOverKills'] = df['headshotKills'] / df['kills']
df['distanceOverWeapons'] = df['totalDistance'] / df['weaponsAcquired']
df['walkDistanceOverHeals'] = df['walkDistance'] / df['heals']
df['walkDistanceOverKills'] = df['walkDistance'] / df['kills']
df['killsPerWalkDistance'] = df['kills'] / df['walkDistance']
df["skill"] = df['headshotKills'] + df['roadKills']
df[df == np.Inf] = np.NaN
df[df == np.NINF] = np.NaN
df.fillna(0, inplace=True )<prepare_x_and_y> | df_titanic["Parch"].value_counts() | Titanic - Machine Learning from Disaster |
721,688 | def feature_engineering(df, is_train=True):
df['rankPoints'] = np.where(df['rankPoints'] <= 0, 0, df['rankPoints'])
features = list(df.columns)
features.remove("matchId")
features.remove("groupId")
features.remove("matchDuration")
features.remove("matchType")
if 'winPlacePerc' in features:
features.remove('winPlacePerc')
y = None
if is_train:
y = df.groupby(['matchId','groupId'])['winPlacePerc'].agg('mean')
elif 'winPlacePerc' in df.columns:
y = df['winPlacePerc']
agg = df.groupby(['matchId','groupId'])[features].agg('mean')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
if is_train:
df_out = agg.reset_index() [['matchId','groupId']]
else:
df_out = df[['matchId','groupId']]
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_mean", "_mean_rank"], how='left', on=['matchId', 'groupId'])
agg = df.groupby(['matchId','groupId'])[features].agg('max')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_max", "_max_rank"], how='left', on=['matchId', 'groupId'])
agg = df.groupby(['matchId','groupId'])[features].agg('min')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_min", "_min_rank"], how='left', on=['matchId', 'groupId'])
agg = df.groupby(['matchId','groupId'] ).size().reset_index(name='group_size')
df_out = df_out.merge(agg, how='left', on=['matchId', 'groupId'])
agg = df.groupby(['matchId'])[features].agg('mean' ).reset_index()
df_out = df_out.merge(agg, suffixes=["", "_match_mean"], how='left', on=['matchId'])
agg = df.groupby(['matchId'] ).size().reset_index(name='match_size')
df_out = df_out.merge(agg, how='left', on=['matchId'])
df_out.drop(["matchId", "groupId"], axis=1, inplace=True)
del agg, agg_rank
return df_out, y<predict_on_test> | df_titanic["SibSp"].value_counts() | Titanic - Machine Learning from Disaster |
721,688 | class Estimator(object):
def fit(self, x_train, y_train, x_valid, y_valid):
raise NotImplementedException
def predict(self, x):
raise NotImplementedException<train_model> | df_titanic["family"]=df_titanic["Parch"]+df_titanic["SibSp"]
df_titanic.loc[(df_titanic["family"]>=1)&(df_titanic["family"]<=3),"family"]=1
df_titanic.loc[df_titanic["family"] >3 |(df_titanic["family"]==0),"family"]=0 | Titanic - Machine Learning from Disaster |
721,688 | class ScikitLearnEstimator(Estimator):
def __init__(self, estimator):
self.estimator = estimator
def fit(self, x_train, y_train, x_valid, y_valid):
self.estimator.fit(x_train, y_train)
def predict(self, x):
return self.estimator.predict(x )<train_model> | df_name=pd.DataFrame(df_titanic["Name"].str.extract('([A-Za-z]+\.) '))
print(df_name["Name"].value_counts() ) | Titanic - Machine Learning from Disaster |
721,688 | def fit_predict_step(estimator, x_train, y_train, train_idx, valid_idx, x_test, oof):
x_train_train = x_train[train_idx]
y_train_train = y_train[train_idx]
x_train_valid = x_train[valid_idx]
y_train_valid = y_train[valid_idx]
estimator.fit(x_train_train, y_train_train, x_train_valid, y_train_valid)
oof_part = estimator.predict(x_train_valid)
print('MAE:', mean_absolute_error(y_train_valid, oof_part))
oof[valid_idx] = oof_part
y_part = estimator.predict(x_test)
return y_part<predict_on_test> | VIP=["Master.","Rev.","Dr.","Col.","Major.","Jonkheer.","Dona.","Capt.","Don.","Sir.","Lady.","Countess."]
df_name.loc[df_name["Name"].isin(VIP),"Name"]="VIP"
df_name.loc[df_name["Name"].isin(["Mlle.","Ms."]),"Name"]="Miss."
df_name.loc[df_name["Name"]=="Mme.","Name"]="Mrs." | Titanic - Machine Learning from Disaster |
721,688 | def fit_predict(estimator, x_train, y_train, x_test):
oof = np.zeros(x_train.shape[0])
y = np.zeros(x_test.shape[0])
kf = KFold(n_splits=5, random_state=42)
for train_idx, valid_idx in kf.split(x_train):
y_part = fit_predict_step(estimator, x_train, y_train, train_idx, valid_idx, x_test, oof)
y += y_part / kf.n_splits
print('Final MAE:', mean_absolute_error(y_train, oof))
return oof, y<train_model> | df_titanic["Name"]=df_name["Name"] | Titanic - Machine Learning from Disaster |
721,688 | def fit_step(estimator, x_train, y_train, train_idx, valid_idx, oof):
x_train_train = x_train[train_idx]
y_train_train = y_train[train_idx]
x_train_valid = x_train[valid_idx]
y_train_valid = y_train[valid_idx]
estimator.fit(x_train_train, y_train_train, x_train_valid, y_train_valid)
oof_part = estimator.predict(x_train_valid)
mae = mean_absolute_error(y_train_valid, oof_part)
print('MAE:', mae)
oof[valid_idx] = oof_part
return estimator, mae<train_model> | df_cabin=pd.DataFrame(df_titanic[["Cabin","Pclass","Fare"]].dropna())
df_cabin["Cabin"]=df_cabin["Cabin"].astype(str ).str[0] | Titanic - Machine Learning from Disaster |
721,688 | def fit(estimator, x_train, y_train):
oof = np.zeros(x_train.shape[0])
kf = KFold(n_splits=5, random_state=42)
trained_estimators = []
for train_idx, valid_idx in kf.split(x_train):
e, mae = fit_step(estimator, x_train, y_train, train_idx, valid_idx, oof)
trained_estimators.append(deepcopy(e))
print('Final MAE:', mean_absolute_error(y_train, oof))
return oof, trained_estimators<predict_on_test> | print(df_cabin["Cabin"].value_counts() ) | Titanic - Machine Learning from Disaster |
721,688 | def predict(trained_estimators, x_test):
y = np.zeros(x_test.shape[0])
for estimator in trained_estimators:
y_part = estimator.predict(x_test)
y += y_part / len(trained_estimators)
return y<train_model> | df_train=df_titanic.loc[0:890,]
df_test=df_titanic.loc[891:1308,] | Titanic - Machine Learning from Disaster |
721,688 | def pipeline_fit(estimator, df_train, scaler=None):
add_new_features_1(df_train)
add_new_features_2(df_train)
add_new_features_3(df_train)
add_new_features_4(df_train)
x_train, y_train = feature_engineering(df_train, is_train=True)
x_train = reduce_mem_usage(x_train)
gc.collect()
if not(scaler is None):
scaler.fit(x_train)
scaled_x_train = scaler.transform(x_train)
else:
scaled_x_train = x_train.values
oof, trained_estimators = fit(estimator, scaled_x_train, y_train.values)
del x_train
del scaled_x_train
del y_train
gc.collect()
return oof, trained_estimators<predict_on_test> | X=df_train.drop(["PassengerId","Cabin","Ticket","Survived","SibSp","Parch"],axis=1)
y=df_train["Survived"]
Submission_set=df_test.drop(["PassengerId","Cabin","Ticket","Survived","SibSp","Parch"],axis=1 ) | Titanic - Machine Learning from Disaster |
721,688 | def pipeline_predict(trained_estimators, df_test, scaler=None):
add_new_features_1(df_test)
add_new_features_2(df_test)
add_new_features_3(df_test)
add_new_features_4(df_test)
x_test, _ = feature_engineering(df_test, is_train=False)
x_test = reduce_mem_usage(x_test)
gc.collect()
if not(scaler is None):
scaled_x_test = scaler.transform(x_test)
else:
scaled_x_test = x_test.values
y = predict(trained_estimators, scaled_x_test)
del x_test
del scaled_x_test
gc.collect()
return y<load_from_csv> | X=pd.get_dummies(X)
Submission_set=pd.get_dummies(Submission_set)
X.info()
X.describe() | Titanic - Machine Learning from Disaster |
721,688 | df_train = pd.read_csv('.. /input/train_V2.csv', index_col='Id')
df_train.shape<feature_engineering> | X=scale(X)
Submission_set=scale(Submission_set ) | Titanic - Machine Learning from Disaster |
721,688 | df_train = reduce_mem_usage(df_train )<set_options> | X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2, random_state=42 ) | Titanic - Machine Learning from Disaster |
721,688 | gc.collect()<drop_column> | Neighbors=KNeighborsClassifier()
n_neighbors = np.arange(1,50)
param_grid = {'n_neighbors': n_neighbors,
'metric':["cityblock","euclidean"]} | Titanic - Machine Learning from Disaster |
721,688 | df_train.drop(df_train[df_train['winPlacePerc'].isnull() ].index, inplace=True )<import_modules> | Neighbors_cv = GridSearchCV(Neighbors,param_grid, cv=5)
Neighbors_cv.fit(X_train,y_train)
print("Tuned k-NN classifier Parameters: {}".format(Neighbors_cv.best_params_))
print("Best score is {}".format(Neighbors_cv.best_score_))
| Titanic - Machine Learning from Disaster |
721,688 | from sklearn.linear_model import LinearRegression<set_options> | y_pred=Neighbors_cv.predict(X_test ) | Titanic - Machine Learning from Disaster |
721,688 | del df_train
del df_oof
gc.collect()<load_from_csv> | print("Confusion Matrix :")
print(confusion_matrix(y_pred,y_test))
print("Classification report :")
print(classification_report(y_pred,y_test)) | Titanic - Machine Learning from Disaster |
721,688 | df_test = pd.read_csv('.. /input/test_V2.csv', index_col = 'Id')
df_test.shape<feature_engineering> | Sub_pred=Neighbors_cv.predict(Submission_set ).astype(int ) | Titanic - Machine Learning from Disaster |
721,688 | <create_dataframe><EOS> | submission = pd.DataFrame({
"PassengerId": df_test["PassengerId"],
"Survived": Sub_pred
})
submission.to_csv('titanic.csv', index=False ) | Titanic - Machine Learning from Disaster |
1,034,460 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<set_options> | if not sys.warnoptions:
warnings.simplefilter("ignore" ) | Titanic - Machine Learning from Disaster |
1,034,460 | gc.collect()<predict_on_test> | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv' ) | Titanic - Machine Learning from Disaster |
1,034,460 | %%time
df_y = pd.DataFrame(index=df_test.index)
df_y['y'] = 0
for matchType in matchTypes:
print("----------", matchType, "----------")
df_test_part = df_test[df_test['matchType'] == matchType]
y_part = pipeline_predict(trained_estimators[matchType], df_test_part, trained_scalers[matchType])
df_y_part = pd.DataFrame(index=df_test_part.index)
df_y_part['y'] = y_part
df_y.update(df_y_part)
y = df_y['y'].values<set_options> | ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train['Survived'].values
passId = test['PassengerId']
data = pd.concat(( train, test))
print("data size is: {}".format(data.shape)) | Titanic - Machine Learning from Disaster |
1,034,460 | del df_test
del df_y
gc.collect()<save_to_csv> | print(data.isnull().sum() ) | Titanic - Machine Learning from Disaster |
1,034,460 | df_oof = pd.DataFrame()
df_oof['linear_oof'] = oof
df_oof.to_csv('linear_oof.csv', index_label='id' )<save_to_csv> | data['Last_Name'] = data['Name'].apply(lambda x: str.split(x, ",")[0])
data['Fare'].fillna(data['Fare'].mean() , inplace=True)
default_survival_chance = 0.5
data['Family_Survival'] = default_survival_chance
for grp, grp_df in data[['Survived','Name', 'Last_Name', 'Fare', 'Ticket', 'PassengerId',
'SibSp', 'Parch', 'Age', 'Cabin']].groupby(['Last_Name', 'Fare']):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
data.loc[data['PassengerId'] == passID, 'Family_Survival'] = 1
elif(smin == 0.0):
data.loc[data['PassengerId'] == passID, 'Family_Survival'] = 0
print("Number of passengers with family survival information:",
data.loc[data['Family_Survival']!=0.5].shape[0] ) | Titanic - Machine Learning from Disaster |
1,034,460 | df_submission = pd.DataFrame(index=df_test_id.index)
df_submission['winPlacePerc'] = y
df_submission.to_csv('linear_raw.csv', index_label='Id' )<load_from_csv> | for _, grp_df in data.groupby('Ticket'):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
if(row['Family_Survival'] == 0)|(row['Family_Survival']== 0.5):
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
data.loc[data['PassengerId'] == passID, 'Family_Survival'] = 1
elif(smin==0.0):
data.loc[data['PassengerId'] == passID, 'Family_Survival'] = 0
print("Number of passenger with family/group survival information: "
+str(data[data['Family_Survival']!=0.5].shape[0])) | Titanic - Machine Learning from Disaster |
1,034,460 | df_test = pd.read_csv('.. /input/test_V2.csv')
df_test.shape<merge> | data = data.reset_index(drop=True)
data = data.drop('Survived', axis=1)
data.tail() | Titanic - Machine Learning from Disaster |
1,034,460 | df_submission = df_submission.merge(df_test[['Id', 'matchId', 'groupId', 'maxPlace', 'numGroups']], on='Id', how='left')
df_submission.head()<merge> | data['Fare'] = pd.qcut(data['Fare'], 4)
lbl = LabelEncoder()
data['Fare'] = lbl.fit_transform(data['Fare'] ) | Titanic - Machine Learning from Disaster |
1,034,460 | df_submission_group = df_submission.groupby(['matchId', 'groupId'] ).first().reset_index()
df_submission_group['rank'] = df_submission_group.groupby(['matchId'])['winPlacePerc'].rank()
df_submission_group = df_submission_group.merge(df_submission_group.groupby('matchId')['rank'].max().to_frame('max_rank' ).reset_index() , on='matchId', how='left')
df_submission_group['adjusted_perc'] =(df_submission_group['rank'] - 1)/(df_submission_group['numGroups'] - 1)
df_submission = df_submission.merge(df_submission_group[['adjusted_perc', 'matchId', 'groupId']], on=['matchId', 'groupId'], how='left')
df_submission['winPlacePerc'] = df_submission['adjusted_perc']
df_submission.head()<feature_engineering> | titles_data = sorted(set([x for x in data['Name'].map(lambda x: get_title(x)) ])) | Titanic - Machine Learning from Disaster |
1,034,460 | df_submission.loc[df_submission.maxPlace == 0, 'winPlacePerc'] = 0
df_submission.loc[df_submission.maxPlace == 1, 'winPlacePerc'] = 1<feature_engineering> | data['Title'] = data['Name'].map(lambda x: get_title(x))
data['Title'] = data.apply(set_title, axis=1 ) | Titanic - Machine Learning from Disaster |
1,034,460 | t = df_submission.loc[df_submission.maxPlace > 1]
gap = 1.0 /(t.maxPlace.values - 1)
fixed_perc = np.around(t.winPlacePerc.values / gap)* gap
df_submission.loc[df_submission.maxPlace > 1, 'winPlacePerc'] = fixed_perc<feature_engineering> | print(data['Title'].value_counts() ) | Titanic - Machine Learning from Disaster |
1,034,460 | df_submission.loc[(df_submission.maxPlace > 1)&(df_submission.numGroups == 1), 'winPlacePerc'] = 0
assert df_submission['winPlacePerc'].isnull().sum() == 0<save_to_csv> | print('Total missing age data: ', pd.isnull(data['Age'] ).sum() ) | Titanic - Machine Learning from Disaster |
1,034,460 | df_submission[['Id', 'winPlacePerc']].to_csv('linear_adjusted.csv', index=False )<set_options> | data['Age'] = data.groupby('Title')['Age'].apply(lambda x: x.fillna(x.median())) | Titanic - Machine Learning from Disaster |
1,034,460 | debug = False;
debug_rows = 10000;
gc.enable()
pd.options.display.float_format = '{:,.2f}'.format
pd.set_option('display.max_columns', 100)
%matplotlib inline
<load_from_csv> | data['Age'] = pd.qcut(data['Age'], 4)
lbl = LabelEncoder()
data['Age'] = lbl.fit_transform(data['Age'] ) | Titanic - Machine Learning from Disaster |
1,034,460 | if(debug):
train = pd.read_csv('.. /input/train_V2.csv', nrows = debug_rows)
test = pd.read_csv('.. /input/test_V2.csv')
else:
train = pd.read_csv('.. /input/train_V2.csv', nrows = debug_rows)
test = pd.read_csv('.. /input/test_V2.csv' )<count_missing_values> | data['Title'] = data['Title'].replace(['Mr', 'Miss', 'Mrs', 'Master'], [0, 1, 2, 3] ) | Titanic - Machine Learning from Disaster |
1,034,460 | train.isnull().sum()<count_missing_values> | data['Sex'] = data['Sex'].replace(['male', 'female'], [0, 1] ) | Titanic - Machine Learning from Disaster |
1,034,460 | train.isnull().sum()<count_missing_values> | data['Embarked'] = data['Embarked'].fillna(data['Embarked'].mode() [0])
data['Embarked'] = data['Embarked'].replace(['S', 'C', 'Q'], [0, 1, 2] ) | Titanic - Machine Learning from Disaster |
1,034,460 | train.dropna(axis=0, how='all')
print(train.isnull().any().any() )<count_values> | data['Cabin'] = data['Cabin'].map(lambda x: x[0] ) | Titanic - Machine Learning from Disaster |
1,034,460 | assists[assists>10].count()<count_values> | data['Cabin'].value_counts() | Titanic - Machine Learning from Disaster |
1,034,460 | eda[eda>15].count()<count_values> | def unknown_cabin(cabin):
if cabin != 'U':
return 1
else:
return 0
data['Cabin'] = data['Cabin'].apply(lambda x:unknown_cabin(x)) | Titanic - Machine Learning from Disaster |
1,034,460 | eda[eda>10].count()<filter> | data['Family Size'] = data['SibSp'] + data['Parch'] | Titanic - Machine Learning from Disaster |
1,034,460 | train[train['walkDistance']>10000]<filter> | data = data.drop(['Name', 'Parch', 'SibSp', 'Ticket', 'Last_Name', 'PassengerId'], axis = 1 ) | Titanic - Machine Learning from Disaster |
1,034,460 | train[train['headshotKills']>10]<filter> | train = data[:ntrain]
test = data[ntrain:] | Titanic - Machine Learning from Disaster |
1,034,460 | train[(train['rideDistance']==0)&(train['roadKills']>0)]<filter> | X_test = test
X_train = train
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test ) | Titanic - Machine Learning from Disaster |
1,034,460 | train[train['teamKills']>3]<drop_column> | ran = RandomForestClassifier(random_state=1)
knn = KNeighborsClassifier()
log = LogisticRegression()
xgb = XGBClassifier()
gbc = GradientBoostingClassifier()
svc = SVC(probability=True)
ext = ExtraTreesClassifier()
ada = AdaBoostClassifier()
gnb = GaussianNB()
gpc = GaussianProcessClassifier()
bag = BaggingClassifier()
models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag]
scores = []
for mod in models:
mod.fit(X_train, y_train)
acc = cross_val_score(mod, X_train, y_train, scoring = "accuracy", cv = 10)
scores.append(acc.mean() ) | Titanic - Machine Learning from Disaster |
1,034,460 | df = train
df = df.drop(df[(df['walkDistance']<10.0)&(df['damageDealt']>0)].index)
df = df.drop(df[(df['walkDistance']<10.0)&(df['kills']>10)].index)
df = df.drop(df[(df['walkDistance']<100.0)&(df['weaponsAcquired']>30)].index)
df = df.drop(df[(df['walkDistance']<10.0)&(df['heals']>100)].index)
df = df.drop(df[(df['walkDistance']<10.0)&(df['headshotKills']>5)].index)
df = df.drop(df[(df['walkDistance']<10.0)&(df['headshotKills']>5)].index)
df['unrated_kill'] = 0
df.loc[df['killPoints']==0, 'unrated_kill']=1
df['poor_kills'] = 0
df.loc[(df['killPoints']>10)&(df['killPoints']<800), 'poor_kills'] = 1
df.loc[(df['killPoints']>10)&(df['killPoints']<800), 'killPoints'] = 0
df['killPerDamage'] = df['kills']/df['damageDealt']
df = df.fillna(0)
df = df.drop(df[df['killStreaks']>=10].index)
df['unrated_rank'] = 0
df.loc[df['rankPoints']==-1, 'unrated_rank']=1
df = df.drop(df[(df['rideDistance']==0.0)&(df['roadKills']>0)].index)
df = df.drop(df[(df['weaponsAcquired']>30)&(df['walkDistance']<100)].index)
df['unrated_win'] = 0
df.loc[(df['winPoints']==-1)|(df['winPoints'] == 0), 'unrated_win']=1
df['poor_wins'] = 0
df.loc[(df['winPoints']>250)&(df['winPoints']<1200), 'poor_wins'] = 1
df.loc[(df['winPoints']>250)&(df['winPoints']<1200), 'killPoints'] = 0
print('removed:' + str(train['Id'].count() - df['Id'].count()))
df.head()
<load_from_csv> | results = pd.DataFrame({
'Model': ['Random Forest', 'K Nearest Neighbour', 'Logistic Regression', 'XGBoost', 'Gradient Boosting', 'SVC', 'Extra Trees', 'AdaBoost', 'Gaussian Naive Bayes', 'Gaussian Process', 'Bagging Classifier'],
'Score': scores})
result_df = results.sort_values(by='Score', ascending=False ).reset_index(drop=True)
result_df.head(11 ) | Titanic - Machine Learning from Disaster |
1,034,460 | def feature_engineering(is_train=True,debug=True):
test_idx = None
if is_train:
print("processing train.csv")
if debug == True:
df = pd.read_csv('.. /input/train_V2.csv', nrows=1000000)
else:
df = pd.read_csv('.. /input/train_V2.csv')
df = df[df['maxPlace'] > 1]
else:
print("processing test.csv")
if debug == True:
df = pd.read_csv('.. /input/test_V2.csv')
else:
df = pd.read_csv('.. /input/test_V2.csv')
test_idx = df.Id
print("remove some columns")
target = 'winPlacePerc'
if(is_train):
print("removing cheaters")
df = df.drop(df[(df['walkDistance']<10.0)&(df['damageDealt']>0)].index)
df = df.drop(df[(df['walkDistance']<10.0)&(df['kills']>10)].index)
df = df.drop(df[(df['walkDistance']<100.0)&(df['weaponsAcquired']>30)].index)
df = df.drop(df[(df['walkDistance']<10.0)&(df['heals']>100)].index)
df = df.drop(df[(df['walkDistance']<10.0)&(df['headshotKills']>5)].index)
df = df.drop(df[(df['walkDistance']<10.0)&(df['headshotKills']>5)].index)
df = df.drop(df[df['killStreaks']>=10].index)
df = df.drop(df[(df['rideDistance']==0.0)&(df['roadKills']>0)].index)
df = df.drop(df[(df['weaponsAcquired']>30)&(df['walkDistance']<100)].index)
df['unrated_kill'] = 0
df.loc[df['killPoints']==0, 'unrated_kill']=1
df['poor_kills'] = 0
df.loc[(df['killPoints']>10)&(df['killPoints']<800), 'poor_kills'] = 1
df.loc[(df['killPoints']>10)&(df['killPoints']<800), 'killPoints'] = 0
df['unrated_rank'] = 0
df.loc[df['rankPoints']==-1, 'unrated_rank']=1
df['unrated_win'] = 0
df.loc[(df['winPoints']==-1)|(df['winPoints'] == 0), 'unrated_win']=1
df['poor_wins'] = 0
df.loc[(df['winPoints']>250)&(df['winPoints']<1200), 'poor_wins'] = 1
df.loc[(df['winPoints']>250)&(df['winPoints']<1200), 'killPoints'] = 0
df[df == np.Inf] = np.NaN
df[df == np.NINF] = np.NaN
print("Removing Na's From DF")
df.fillna(0, inplace=True)
features = list(df.columns)
features.remove("Id")
features.remove("matchId")
features.remove("groupId")
features.remove("matchType")
y = None
if is_train:
print("get target")
y = np.array(df.groupby(['matchId','groupId'])[target].agg('mean'), dtype=np.float64)
features.remove(target)
print("get group mean feature")
agg = df.groupby(['matchId','groupId'])[features].agg('mean')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
if is_train: df_out = agg.reset_index() [['matchId','groupId']]
else: df_out = df[['matchId','groupId']]
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_mean", "_mean_rank"], how='left', on=['matchId', 'groupId'])
print("get group max feature")
agg = df.groupby(['matchId','groupId'])[features].agg('max')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_max", "_max_rank"], how='left', on=['matchId', 'groupId'])
print("get group min feature")
agg = df.groupby(['matchId','groupId'])[features].agg('min')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_min", "_min_rank"], how='left', on=['matchId', 'groupId'])
print("get group size feature")
agg = df.groupby(['matchId','groupId'] ).size().reset_index(name='group_size')
df_out = df_out.merge(agg, how='left', on=['matchId', 'groupId'])
print("get match mean feature")
agg = df.groupby(['matchId'])[features].agg('mean' ).reset_index()
df_out = df_out.merge(agg, suffixes=["", "_match_mean"], how='left', on=['matchId'])
print("get match size feature")
agg = df.groupby(['matchId'] ).size().reset_index(name='match_size')
df_out = df_out.merge(agg, how='left', on=['matchId'])
print("Adding Features")
df['headshotrate'] = df['kills']/df['headshotKills']
df['killStreakrate'] = df['killStreaks']/df['kills']
df['healthitems'] = df['heals'] + df['boosts']
df['totalDistance'] = df['rideDistance'] + df["walkDistance"] + df["swimDistance"]
df['killPlace_over_maxPlace'] = df['killPlace'] / df['maxPlace']
df['headshotKills_over_kills'] = df['headshotKills'] / df['kills']
df['distance_over_weapons'] = df['totalDistance'] / df['weaponsAcquired']
df['walkDistance_over_heals'] = df['walkDistance'] / df['heals']
df['walkDistance_over_kills'] = df['walkDistance'] / df['kills']
df['killsPerWalkDistance'] = df['kills'] / df['walkDistance']
df["skill"] = df["headshotKills"] + df["roadKills"]
df[df == np.Inf] = np.NaN
df[df == np.NINF] = np.NaN
print("Removing Na's From DF")
df.fillna(0, inplace=True)
df_out.drop(["matchId", "groupId"], axis=1, inplace=True)
X = df_out
feature_names = list(df_out.columns)
del df, df_out, agg, agg_rank
gc.collect()
return X, y, feature_names, test_idx<prepare_x_and_y> | fi = {'Features':train.columns.tolist() , 'Importance':xgb.feature_importances_}
importance = pd.DataFrame(fi, index=None ).sort_values('Importance', ascending=False ) | Titanic - Machine Learning from Disaster |
1,034,460 | x_train, y_train, train_columns, _ = feature_engineering(True, debug=debug)
x_test, _, _ , test_idx = feature_engineering(False, debug=debug)
<train_model> | fi = {'Features':train.columns.tolist() , 'Importance':np.transpose(log.coef_[0])}
importance = pd.DataFrame(fi, index=None ).sort_values('Importance', ascending=False ) | Titanic - Machine Learning from Disaster |
1,034,460 | gc.collect() ;
train_index = round(int(x_train.shape[0]*0.8))
dev_X = x_train[:train_index]
val_X = x_train[train_index:]
dev_y = y_train[:train_index]
val_y = y_train[train_index:]
gc.collect() ;
def run_lgb(train_X, train_y, val_X, val_y, x_test):
params = {"objective" : "regression", "metric" : "mae", 'n_estimators':20000, 'early_stopping_rounds':200,
"num_leaves" : 31, "learning_rate" : 0.05, "bagging_fraction" : 0.7,
"bagging_seed" : 0, "num_threads" : 4,"colsample_bytree" : 0.7
}
lgtrain = lgb.Dataset(train_X, label=train_y)
lgval = lgb.Dataset(val_X, label=val_y)
model = lgb.train(params, lgtrain, valid_sets=[lgtrain, lgval], early_stopping_rounds=200, verbose_eval=1000)
pred_test_y = model.predict(x_test, num_iteration=model.best_iteration)
return pred_test_y, model
pred_test, model = run_lgb(dev_X, dev_y, val_X, val_y, x_test)
pred_test<load_from_csv> | gbc_imp = pd.DataFrame({'Feature':train.columns, 'gbc importance':gbc.feature_importances_})
xgb_imp = pd.DataFrame({'Feature':train.columns, 'xgb importance':xgb.feature_importances_})
ran_imp = pd.DataFrame({'Feature':train.columns, 'ran importance':ran.feature_importances_})
ext_imp = pd.DataFrame({'Feature':train.columns, 'ext importance':ext.feature_importances_})
ada_imp = pd.DataFrame({'Feature':train.columns, 'ada importance':ada.feature_importances_})
importances = gbc_imp.merge(xgb_imp, on='Feature' ).merge(ran_imp, on='Feature' ).merge(ext_imp, on='Feature' ).merge(ada_imp, on='Feature')
importances['Average'] = importances.mean(axis=1)
importances = importances.sort_values(by='Average', ascending=False ).reset_index(drop=True)
importances | Titanic - Machine Learning from Disaster |
1,034,460 | print(pred_test.shape[0])
pred_test
df_sub = pd.read_csv(".. /input/sample_submission_V2.csv")
df_sub['winPlacePerc'] = pred_test
df_sub.head()
<load_from_csv> | fi = {'Features':importances['Feature'], 'Importance':importances['Average']}
importance = pd.DataFrame(fi, index=None ).sort_values('Importance', ascending=False ) | Titanic - Machine Learning from Disaster |
1,034,460 | if(debug):
df_sub = pd.read_csv(".. /input/sample_submission_V2.csv", nrows=pred_test.shape[0])
df_test = pd.read_csv(".. /input/test_V2.csv", nrows=pred_test.shape[0])
else:
df_sub = pd.read_csv(".. /input/sample_submission_V2.csv")
df_test = pd.read_csv(".. /input/test_V2.csv")
df_sub['winPlacePerc'] = pred_test
df_sub = df_sub.merge(df_test[["Id", "matchId", "groupId", "maxPlace", "numGroups"]], on="Id", how="left")
df_sub_group = df_sub.groupby(["matchId", "groupId"] ).first().reset_index()
df_sub_group["rank"] = df_sub_group.groupby(["matchId"])["winPlacePerc"].rank()
df_sub_group = df_sub_group.merge(
df_sub_group.groupby("matchId")["rank"].max().to_frame("max_rank" ).reset_index() ,
on="matchId", how="left")
df_sub_group["adjusted_perc"] =(df_sub_group["rank"] - 1)/(df_sub_group["numGroups"] - 1)
df_sub = df_sub.merge(df_sub_group[["adjusted_perc", "matchId", "groupId"]], on=["matchId", "groupId"], how="left")
df_sub["winPlacePerc"] = df_sub["adjusted_perc"]
df_sub.loc[df_sub.maxPlace == 0, "winPlacePerc"] = 0
df_sub.loc[df_sub.maxPlace == 1, "winPlacePerc"] = 1
subset = df_sub.loc[df_sub.maxPlace > 1]
gap = 1.0 /(subset.maxPlace.values - 1)
new_perc = np.around(subset.winPlacePerc.values / gap)* gap
df_sub.loc[df_sub.maxPlace > 1, "winPlacePerc"] = new_perc
df_sub.loc[(df_sub.maxPlace > 1)&(df_sub.numGroups == 1), "winPlacePerc"] = 0
assert df_sub["winPlacePerc"].isnull().sum() == 0
df_sub[["Id", "winPlacePerc"]].to_csv("submission_adjusted.csv", index=False)
df_sub<drop_column> | train = train.drop(['Embarked', 'Cabin'], axis=1)
test = test.drop(['Embarked', 'Cabin'], axis=1)
X_train = train
X_test = test
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test ) | Titanic - Machine Learning from Disaster |
1,034,460 |
<prepare_x_and_y> | ran = RandomForestClassifier(random_state=1)
knn = KNeighborsClassifier()
log = LogisticRegression()
xgb = XGBClassifier(random_state=1)
gbc = GradientBoostingClassifier(random_state=1)
svc = SVC(probability=True)
ext = ExtraTreesClassifier(random_state=1)
ada = AdaBoostClassifier(random_state=1)
gnb = GaussianNB()
gpc = GaussianProcessClassifier()
bag = BaggingClassifier(random_state=1)
models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag]
scores_v2 = []
for mod in models:
mod.fit(X_train, y_train)
acc = cross_val_score(mod, X_train, y_train, scoring = "accuracy", cv = 10)
scores_v2.append(acc.mean() ) | Titanic - Machine Learning from Disaster |
1,034,460 |
<compute_test_metric> | results = pd.DataFrame({
'Model': ['Random Forest', 'K Nearest Neighbour', 'Logistic Regression', 'XGBoost', 'Gradient Boosting', 'SVC', 'Extra Trees', 'AdaBoost', 'Gaussian Naive Bayes', 'Gaussian Process', 'Bagging Classifier'],
'Original Score': scores,
'Score with feature selection': scores_v2})
result_df = results.sort_values(by='Score with feature selection', ascending=False ).reset_index(drop=True)
result_df.head(11 ) | Titanic - Machine Learning from Disaster |
1,034,460 |
<load_from_csv> | Cs = [0.001, 0.01, 0.1, 1, 5, 10, 15, 20, 50, 100]
gammas = [0.001, 0.01, 0.1, 1]
hyperparams = {'C': Cs, 'gamma' : gammas}
gd=GridSearchCV(estimator = SVC(probability=True), param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy")
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
1,034,460 |
<feature_engineering> | learning_rate = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2]
n_estimators = [100, 250, 500, 750, 1000, 1250, 1500]
hyperparams = {'learning_rate': learning_rate, 'n_estimators': n_estimators}
gd=GridSearchCV(estimator = GradientBoostingClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy")
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
1,034,460 |
<load_from_csv> | penalty = ['l1', 'l2']
C = np.logspace(0, 4, 10)
hyperparams = {'penalty': penalty, 'C': C}
gd=GridSearchCV(estimator = LogisticRegression() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy")
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
1,034,460 | warnings.filterwarnings('ignore')
%matplotlib inline
py.init_notebook_mode(connected=True)
debug = False
if debug == True:
df_train = pd.read_csv('.. /input/train_V2.csv', nrows=10000)
df_test = pd.read_csv('.. /input/test_V2.csv')
else:
df_train = pd.read_csv('.. /input/train_V2.csv')
df_test = pd.read_csv('.. /input/test_V2.csv' )<filter> | learning_rate = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2]
n_estimators = [10, 25, 50, 75, 100, 250, 500, 750, 1000]
hyperparams = {'learning_rate': learning_rate, 'n_estimators': n_estimators}
gd=GridSearchCV(estimator = XGBClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy")
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
1,034,460 | df_train[df_train['groupId']=='4d4b580de459be']<feature_engineering> | max_depth = [3, 4, 5, 6, 7, 8, 9, 10]
min_child_weight = [1, 2, 3, 4, 5, 6]
hyperparams = {'max_depth': max_depth, 'min_child_weight': min_child_weight}
gd=GridSearchCV(estimator = XGBClassifier(learning_rate=0.0001, n_estimators=10), param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy")
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
1,034,460 | df_train = df_train[df_train['Id']!='f70c74418bb064']
headshot = df_train[['kills','winPlacePerc','headshotKills']]
headshot['headshotrate'] = headshot['kills'] / headshot['headshotKills']<feature_engineering> | gamma = [i*0.1 for i in range(0,5)]
hyperparams = {'gamma': gamma}
gd=GridSearchCV(estimator = XGBClassifier(learning_rate=0.0001, n_estimators=10, max_depth=3,
min_child_weight=1), param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy")
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
1,034,460 | del headshot
df_train['headshotrate'] = df_train['kills']/df_train['headshotKills']
df_test['headshotrate'] = df_test['kills']/df_test['headshotKills']<feature_engineering> | subsample = [0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1]
colsample_bytree = [0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1]
hyperparams = {'subsample': subsample, 'colsample_bytree': colsample_bytree}
gd=GridSearchCV(estimator = XGBClassifier(learning_rate=0.0001, n_estimators=10, max_depth=3,
min_child_weight=1, gamma=0), param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy")
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
1,034,460 | killStreak = df_train[['kills','winPlacePerc','killStreaks']]
killStreak['killStreakrate'] = killStreak['killStreaks']/killStreak['kills']
killStreak.corr()<drop_column> | reg_alpha = [1e-5, 1e-2, 0.1, 1, 100]
hyperparams = {'reg_alpha': reg_alpha}
gd=GridSearchCV(estimator = XGBClassifier(learning_rate=0.0001, n_estimators=10, max_depth=3,
min_child_weight=1, gamma=0, subsample=0.6, colsample_bytree=0.9),
param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy")
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
1,034,460 | del healthitems<feature_engineering> | n_restarts_optimizer = [0, 1, 2, 3]
max_iter_predict = [1, 2, 5, 10, 20, 35, 50, 100]
warm_start = [True, False]
hyperparams = {'n_restarts_optimizer': n_restarts_optimizer, 'max_iter_predict': max_iter_predict, 'warm_start': warm_start}
gd=GridSearchCV(estimator = GaussianProcessClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy")
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
1,034,460 | kills = df_train[['assists','winPlacePerc','kills']]
kills['kills_assists'] =(kills['kills'] + kills['assists'] )<set_options> | n_estimators = [10, 25, 50, 75, 100, 125, 150, 200]
learning_rate = [0.001, 0.01, 0.1, 0.5, 1, 1.5, 2]
hyperparams = {'n_estimators': n_estimators, 'learning_rate': learning_rate}
gd=GridSearchCV(estimator = AdaBoostClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy")
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
1,034,460 | del df_train,df_test;
gc.collect()<load_from_csv> | n_neighbors = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20]
algorithm = ['auto']
weights = ['uniform', 'distance']
leaf_size = [1, 2, 3, 4, 5, 10, 15, 20, 25, 30]
hyperparams = {'algorithm': algorithm, 'weights': weights, 'leaf_size': leaf_size,
'n_neighbors': n_neighbors}
gd=GridSearchCV(estimator = KNeighborsClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy")
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
1,034,460 | def feature_engineering(is_train=True,debug=True):
test_idx = None
if is_train:
print("processing train.csv")
if debug == True:
df = pd.read_csv('.. /input/train_V2.csv', nrows=10000)
else:
df = pd.read_csv('.. /input/train_V2.csv')
df = df[df['maxPlace'] > 1]
else:
print("processing test.csv")
df = pd.read_csv('.. /input/test_V2.csv')
test_idx = df.Id
print("remove some columns")
target = 'winPlacePerc'
features = list(df.columns)
features.remove("Id")
features.remove("matchId")
features.remove("groupId")
features.remove("matchType")
y = None
if is_train:
print("get target")
y = np.array(df.groupby(['matchId','groupId'])[target].agg('mean'), dtype=np.float64)
features.remove(target)
print("get group mean feature")
agg = df.groupby(['matchId','groupId'])[features].agg('mean')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
if is_train: df_out = agg.reset_index() [['matchId','groupId']]
else: df_out = df[['matchId','groupId']]
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_mean", "_mean_rank"], how='left', on=['matchId', 'groupId'])
print("get group max feature")
agg = df.groupby(['matchId','groupId'])[features].agg('max')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_max", "_max_rank"], how='left', on=['matchId', 'groupId'])
print("get group min feature")
agg = df.groupby(['matchId','groupId'])[features].agg('min')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_min", "_min_rank"], how='left', on=['matchId', 'groupId'])
print("get group size feature")
agg = df.groupby(['matchId','groupId'] ).size().reset_index(name='group_size')
df_out = df_out.merge(agg, how='left', on=['matchId', 'groupId'])
print("get match mean feature")
agg = df.groupby(['matchId'])[features].agg('mean' ).reset_index()
df_out = df_out.merge(agg, suffixes=["", "_match_mean"], how='left', on=['matchId'])
print("get match size feature")
agg = df.groupby(['matchId'] ).size().reset_index(name='match_size')
df_out = df_out.merge(agg, how='left', on=['matchId'])
df_out.drop(["matchId", "groupId"], axis=1, inplace=True)
X = df_out
feature_names = list(df_out.columns)
del df, df_out, agg, agg_rank
gc.collect()
return X, y, feature_names, test_idx
x_train, y_train, train_columns, _ = feature_engineering(True,False)
x_test, _, _ , test_idx = feature_engineering(False,True )<feature_engineering> | n_estimators = [10, 25, 50, 75, 100]
max_depth = [3, None]
max_features = [1, 3, 5, 7]
min_samples_split = [2, 4, 6, 8, 10]
min_samples_leaf = [2, 4, 6, 8, 10]
hyperparams = {'n_estimators': n_estimators, 'max_depth': max_depth, 'max_features': max_features,
'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf}
gd=GridSearchCV(estimator = RandomForestClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy")
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
1,034,460 | x_train['headshotrate'] = x_train['kills']/x_train['headshotKills']
x_test['headshotrate'] = x_test['kills']/x_test['headshotKills']
x_train['killStreakrate'] = x_train['killStreaks']/x_train['kills']
x_test['killStreakrate'] = x_test['killStreaks']/x_test['kills']
x_train['healthitems'] = x_train['heals'] + x_train['boosts']
x_test['healthitems'] = x_test['heals'] + x_test['boosts']
del x_train['heals'];del x_test['heals']
train_columns.append('headshotrate')
train_columns.append('killStreakrate')
train_columns.append('healthitems')
train_columns.remove('heals' )<drop_column> | n_estimators = [10, 25, 50, 75, 100]
max_depth = [3, None]
max_features = [1, 3, 5, 7]
min_samples_split = [2, 4, 6, 8, 10]
min_samples_leaf = [2, 4, 6, 8, 10]
hyperparams = {'n_estimators': n_estimators, 'max_depth': max_depth, 'max_features': max_features,
'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf}
gd=GridSearchCV(estimator = ExtraTreesClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy")
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
1,034,460 | x_train = reduce_mem_usage(x_train)
x_test = reduce_mem_usage(x_test )<set_options> | n_estimators = [10, 15, 20, 25, 50, 75, 100, 150]
max_samples = [1, 2, 3, 5, 7, 10, 15, 20, 25, 30, 50]
max_features = [1, 3, 5, 7]
hyperparams = {'n_estimators': n_estimators, 'max_samples': max_samples, 'max_features': max_features}
gd=GridSearchCV(estimator = BaggingClassifier() , param_grid = hyperparams,
verbose=True, cv=5, scoring = "accuracy")
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
1,034,460 | warnings.filterwarnings("ignore")
<split> | ran = RandomForestClassifier(n_estimators=25,
max_depth=3,
max_features=3,
min_samples_leaf=2,
min_samples_split=8,
random_state=1)
knn = KNeighborsClassifier(algorithm='auto',
leaf_size=1,
n_neighbors=5,
weights='uniform')
log = LogisticRegression(C=2.7825594022071245,
penalty='l2')
xgb = XGBClassifier(learning_rate=0.0001,
n_estimators=10,
random_state=1)
gbc = GradientBoostingClassifier(learning_rate=0.0005,
n_estimators=1250,
random_state=1)
svc = SVC(probability=True)
ext = ExtraTreesClassifier(max_depth=None,
max_features=3,
min_samples_leaf=2,
min_samples_split=8,
n_estimators=10,
random_state=1)
ada = AdaBoostClassifier(learning_rate=0.1,
n_estimators=50,
random_state=1)
gpc = GaussianProcessClassifier()
bag = BaggingClassifier(random_state=1)
models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag]
scores_v3 = []
for mod in models:
mod.fit(X_train, y_train)
acc = cross_val_score(mod, X_train, y_train, scoring = "accuracy", cv = 10)
scores_v3.append(acc.mean() ) | Titanic - Machine Learning from Disaster |
1,034,460 | folds = KFold(n_splits=3,random_state=6)
oof_preds = np.zeros(x_train.shape[0])
sub_preds = np.zeros(x_test.shape[0])
start = time.time()
valid_score = 0
feature_importance_df = pd.DataFrame()
for n_fold,(trn_idx, val_idx)in enumerate(folds.split(x_train, y_train)) :
trn_x, trn_y = x_train.iloc[trn_idx], y_train[trn_idx]
val_x, val_y = x_train.iloc[val_idx], y_train[val_idx]
train_data = lgb.Dataset(data=trn_x, label=trn_y)
valid_data = lgb.Dataset(data=val_x, label=val_y)
params = {"objective" : "regression", "metric" : "mae", 'n_estimators':15000, 'early_stopping_rounds':100,
"num_leaves" : 31, "learning_rate" : 0.05, "bagging_fraction" : 0.9,
"bagging_seed" : 0, "num_threads" : 4,"colsample_bytree" : 0.7
}
lgb_model = lgb.train(params, train_data, valid_sets=[train_data, valid_data], verbose_eval=1000)
oof_preds[val_idx] = lgb_model.predict(val_x, num_iteration=lgb_model.best_iteration)
oof_preds[oof_preds>1] = 1
oof_preds[oof_preds<0] = 0
sub_pred = lgb_model.predict(x_test, num_iteration=lgb_model.best_iteration)
sub_pred[sub_pred>1] = 1
sub_pred[sub_pred<0] = 0
sub_preds += sub_pred/ folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = train_columns
fold_importance_df["importance"] = lgb_model.feature_importance()
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
gc.collect()
end = time.time()
print("Take Time :",(end-start))<load_from_csv> | results = pd.DataFrame({
'Model': ['Random Forest', 'K Nearest Neighbour', 'Logistic Regression', 'XGBoost', 'Gradient Boosting', 'SVC', 'Extra Trees', 'AdaBoost', 'Gaussian Naive Bayes', 'Gaussian Process', 'Bagging Classifier'],
'Original Score': scores,
'Score with feature selection': scores_v2,
'Score with tuned parameters': scores_v3})
result_df = results.sort_values(by='Score with tuned parameters', ascending=False ).reset_index(drop=True)
result_df.head(11 ) | Titanic - Machine Learning from Disaster |
1,034,460 | df_test = pd.read_csv('.. /input/' + 'test_V2.csv')
pred = sub_preds
print("fix winPlacePerc")
for i in range(len(df_test)) :
winPlacePerc = pred[i]
maxPlace = int(df_test.iloc[i]['maxPlace'])
if maxPlace == 0:
winPlacePerc = 0.0
elif maxPlace == 1:
winPlacePerc = 1.0
else:
gap = 1.0 /(maxPlace - 1)
winPlacePerc = round(winPlacePerc / gap)* gap
if winPlacePerc < 0: winPlacePerc = 0.0
if winPlacePerc > 1: winPlacePerc = 1.0
pred[i] = winPlacePerc
if(i + 1)% 100000 == 0:
print(i, flush=True, end=" ")
df_test['winPlacePerc'] = pred
submission = df_test[['Id', 'winPlacePerc']]
submission.to_csv('submission.csv', index=False )<import_modules> | grid_hard = VotingClassifier(estimators = [('Random Forest', ran),
('Logistic Regression', log),
('XGBoost', xgb),
('Gradient Boosting', gbc),
('Extra Trees', ext),
('AdaBoost', ada),
('Gaussian Process', gpc),
('SVC', svc),
('K Nearest Neighbour', knn),
('Bagging Classifier', bag)], voting = 'hard')
grid_hard_cv = model_selection.cross_validate(grid_hard, X_train, y_train, cv = 10)
grid_hard.fit(X_train, y_train)
print("Hard voting on train set score mean: {:.2f}".format(grid_hard_cv['train_score'].mean() *100))
print("Hard voting on test set score mean: {:.2f}".format(grid_hard_cv['test_score'].mean() *100)) | Titanic - Machine Learning from Disaster |
1,034,460 | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt<load_from_csv> | grid_soft = VotingClassifier(estimators = [('Random Forest', ran),
('Logistic Regression', log),
('XGBoost', xgb),
('Gradient Boosting', gbc),
('Extra Trees', ext),
('AdaBoost', ada),
('Gaussian Process', gpc),
('SVC', svc),
('K Nearest Neighbour', knn),
('Bagging Classifier', bag)], voting = 'soft')
grid_soft_cv = model_selection.cross_validate(grid_soft, X_train, y_train, cv = 10)
grid_soft.fit(X_train, y_train)
print("Soft voting on train set score mean: {:.2f}".format(grid_soft_cv['train_score'].mean() *100))
print("Soft voting on test set score mean: {:.2f}".format(grid_soft_cv['test_score'].mean() *100)) | Titanic - Machine Learning from Disaster |
1,034,460 | develop_mode = False
if develop_mode:
df_train = reduce_mem_usage(pd.read_csv('.. /input/train_V2.csv', nrows=5000))
df_test = reduce_mem_usage(pd.read_csv('.. /input/test_V2.csv'))
else:
df_train = reduce_mem_usage(pd.read_csv('.. /input/train_V2.csv'))
df_test = reduce_mem_usage(pd.read_csv('.. /input/test_V2.csv'))<train_model> | predictions = grid_soft.predict(X_test)
submission = pd.concat([pd.DataFrame(passId), pd.DataFrame(predictions)], axis = 'columns')
submission.columns = ["PassengerId", "Survived"]
submission.to_csv('titanic_submission.csv', header = True, index = False ) | Titanic - Machine Learning from Disaster |
1,808,700 | print('The sizes of the datasets are:')
print('Training Dataset: ', df_train.shape)
print('Testing Dataset: ', df_test.shape )<sort_values> | warnings.filterwarnings('ignore')
| Titanic - Machine Learning from Disaster |
1,808,700 | group_tmp = df_train[df_train['matchId']=='df014fbee741c6']['groupId'].value_counts().sort_values(ascending=False )<set_options> | train = pd.read_csv(".. /input/train.csv")
train['label'] = 'train'
test = pd.read_csv(".. /input/test.csv")
test['label'] = 'test'
test_passengerId = test.PassengerId
df = train.append(test)
df.sample(2 ) | Titanic - Machine Learning from Disaster |
1,808,700 | warnings.filterwarnings('ignore' )<load_from_csv> | df.isnull().sum() | Titanic - Machine Learning from Disaster |
1,808,700 | def feature_engineering(is_train=True,debug=True):
test_idx = None
if is_train:
print("processing train.csv")
if debug == True:
df = pd.read_csv('.. /input/train_V2.csv', nrows=10000)
else:
df = pd.read_csv('.. /input/train_V2.csv')
df = df[df['maxPlace'] > 1]
else:
print("processing test.csv")
df = pd.read_csv('.. /input/test_V2.csv')
test_idx = df.Id
df = reduce_mem_usage(df)
print("remove some columns")
target = 'winPlacePerc'
features = list(df.columns)
features.remove("Id")
features.remove("matchId")
features.remove("groupId")
features.remove("matchType")
y = None
if is_train:
print("get target")
y = np.array(df.groupby(['matchId','groupId'])[target].agg('mean'), dtype=np.float64)
features.remove(target)
print("get group mean feature")
agg = df.groupby(['matchId','groupId'])[features].agg('mean')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
if is_train: df_out = agg.reset_index() [['matchId','groupId']]
else: df_out = df[['matchId','groupId']]
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_mean", "_mean_rank"], how='left', on=['matchId', 'groupId'])
print("get group max feature")
agg = df.groupby(['matchId','groupId'])[features].agg('max')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_max", "_max_rank"], how='left', on=['matchId', 'groupId'])
print("get group min feature")
agg = df.groupby(['matchId','groupId'])[features].agg('min')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_min", "_min_rank"], how='left', on=['matchId', 'groupId'])
print("get group size feature")
agg = df.groupby(['matchId','groupId'] ).size().reset_index(name='group_size')
df_out = df_out.merge(agg, how='left', on=['matchId', 'groupId'])
print("get match mean feature")
agg = df.groupby(['matchId'])[features].agg('mean' ).reset_index()
df_out = df_out.merge(agg, suffixes=["", "_match_mean"], how='left', on=['matchId'])
print("get match size feature")
agg = df.groupby(['matchId'] ).size().reset_index(name='match_size')
df_out = df_out.merge(agg, how='left', on=['matchId'])
df_out.drop(["matchId", "groupId"], axis=1, inplace=True)
X = df_out
feature_names = list(df_out.columns)
del df, df_out, agg, agg_rank
gc.collect()
return X, y, feature_names, test_idx<prepare_x_and_y> | df['Embarked'].fillna('S', inplace = True ) | Titanic - Machine Learning from Disaster |
1,808,700 | X_train, y_train, train_columns, _ = feature_engineering(True,False)
X_test, _, _ , test_idx = feature_engineering(False,True )<drop_column> | df[df.Fare.isnull() ] | Titanic - Machine Learning from Disaster |
1,808,700 | X_train =reduce_mem_usage(X_train)
X_test = reduce_mem_usage(X_test )<train_model> | print(df[df.Pclass == 1].Fare.quantile([0.25, 0.50, 0.75]))
print(df[df.Pclass == 2].Fare.quantile([0.25, 0.50, 0.75]))
print(df[df.Pclass == 3].Fare.quantile([0.25, 0.50, 0.75])) | Titanic - Machine Learning from Disaster |
1,808,700 | LR_model = LinearRegression(n_jobs=4, normalize=True)
LR_model.fit(X_train,y_train )<compute_test_metric> | df['Fare'].fillna(df[df.Pclass == 3].Fare.median() , inplace = True ) | Titanic - Machine Learning from Disaster |
1,808,700 | LR_model.score(X_train,y_train )<predict_on_test> | print("Age column has", df.Age.isnull().sum() , "missing values out of", len(df), ".Missing value percentage =", df.Age.isnull().sum() /len(df)*100 ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.