kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
2,064,952
kappa_metrics = Metrics() history = model.fit_generator( data_generator, steps_per_epoch=x_train.shape[0] / BATCH_SIZE, epochs=15, validation_data=(x_val, y_val) )<predict_on_test>
salutation = [i.split(",")[1].split(".")[0].strip() for i in combdata["Name"]] combdata["Title"] = pd.Series(salutation) combdata["Title"].value_counts()
Titanic - Machine Learning from Disaster
2,064,952
y_test = model.predict(x_test) y_test<filter>
combdata['Title'] = combdata['Title'].replace('Mlle', 'Miss') combdata['Title'] = combdata['Title'].replace(['Mme','Lady','Ms'], 'Mrs') combdata.Title.loc[(combdata.Title != 'Master')&(combdata.Title != 'Mr')& (combdata.Title != 'Miss')&(combdata.Title != 'Mrs')] = 'Others' combdata["Title"].value_counts()
Titanic - Machine Learning from Disaster
2,064,952
y_test = y_test > 0.37757874193797547 y_test<data_type_conversions>
combdata[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
2,064,952
y_test.astype(int ).sum(axis=1 )<data_type_conversions>
combdata = pd.get_dummies(combdata, columns = ["Title"] )
Titanic - Machine Learning from Disaster
2,064,952
y_test.astype(int ).sum(axis=1)- 1<data_type_conversions>
combdata["Fare"].isnull().sum() combdata["Fare"] = combdata["Fare"].fillna(combdata["Fare"].median())
Titanic - Machine Learning from Disaster
2,064,952
y_test = y_test.astype(int ).sum(axis=1)- 1 y_test<save_to_csv>
combdata['Fare-bin'] = pd.qcut(combdata.Fare,5,labels=[1,2,3,4,5] ).astype(int) combdata[['Fare-bin', 'Survived']].groupby(['Fare-bin'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
2,064,952
test_df['diagnosis'] = y_test test_df.to_csv('submission.csv',index=False )<set_options>
Titanic - Machine Learning from Disaster
2,064,952
warnings.filterwarnings("ignore") gc.enable() print(os.listdir(".. /input")) <load_from_csv>
combdata_temp = combdata[['Age','Title_Master','Title_Miss','Title_Mr','Title_Mrs','Title_Others','Fare-bin','SibSp']] X = combdata_temp.dropna().drop('Age', axis=1) Y = combdata['Age'].dropna() holdout = combdata_temp.loc[np.isnan(combdata.Age)].drop('Age', axis=1) regressor = RandomForestRegressor(n_estimators = 300) regressor.fit(X, Y) y_pred = np.round(regressor.predict(holdout),1) combdata.Age.loc[combdata.Age.isnull() ] = y_pred combdata.Age.isnull().sum(axis=0 )
Titanic - Machine Learning from Disaster
2,064,952
def feature_engineering(is_train=True,debug=True): if is_train: print("processing train.csv") if debug == True: df = pd.read_csv('.. /input/train_V2.csv', nrows=10000) else: df = pd.read_csv('.. /input/train_V2.csv') df = df[df['maxPlace'] > 1] else: print("processing test.csv") df = pd.read_csv('.. /input/test_V2.csv') print("remove some columns") target = 'winPlacePerc' print("Adding Features") df['headshotrate'] = df['kills']/df['headshotKills'] df['killStreakrate'] = df['killStreaks']/df['kills'] df['healthitems'] = df['heals'] + df['boosts'] df['totalDistance'] = df['rideDistance'] + df["walkDistance"] + df["swimDistance"] df['killPlace_over_maxPlace'] = df['killPlace'] / df['maxPlace'] df['headshotKills_over_kills'] = df['headshotKills'] / df['kills'] df['distance_over_weapons'] = df['totalDistance'] / df['weaponsAcquired'] df['walkDistance_over_heals'] = df['walkDistance'] / df['heals'] df['walkDistance_over_kills'] = df['walkDistance'] / df['kills'] df['killsPerWalkDistance'] = df['kills'] / df['walkDistance'] df["skill"] = df["headshotKills"] + df["roadKills"] df[df == np.Inf] = np.NaN df[df == np.NINF] = np.NaN print("Removing Na's From DF") df.fillna(0, inplace=True) features = list(df.columns) features.remove("Id") features.remove("matchId") features.remove("groupId") features.remove("matchType") y = None if is_train: print("get target") y = df.groupby(['matchId','groupId'])[target].agg('mean') features.remove(target) print("get group mean feature") agg = df.groupby(['matchId','groupId'])[features].agg('mean') agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index() if is_train: df_out = agg.reset_index() [['matchId','groupId']] else: df_out = df[['matchId','groupId']] df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId']) df_out = df_out.merge(agg_rank, suffixes=["_mean", "_mean_rank"], how='left', on=['matchId', 'groupId']) print("get group max feature") agg = df.groupby(['matchId','groupId'])[features].agg('max') agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index() df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId']) df_out = df_out.merge(agg_rank, suffixes=["_max", "_max_rank"], how='left', on=['matchId', 'groupId']) print("get group min feature") agg = df.groupby(['matchId','groupId'])[features].agg('min') agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index() df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId']) df_out = df_out.merge(agg_rank, suffixes=["_min", "_min_rank"], how='left', on=['matchId', 'groupId']) print("get group size feature") agg = df.groupby(['matchId','groupId'] ).size().reset_index(name='group_size') df_out = df_out.merge(agg, how='left', on=['matchId', 'groupId']) print("get match mean feature") agg = df.groupby(['matchId'])[features].agg('mean' ).reset_index() df_out = df_out.merge(agg, suffixes=["", "_match_mean"], how='left', on=['matchId']) print("get match size feature") agg = df.groupby(['matchId'] ).size().reset_index(name='match_size') df_out = df_out.merge(agg, how='left', on=['matchId']) df_out.drop(["matchId", "groupId"], axis=1, inplace=True) X = df_out del df, df_out, agg, agg_rank gc.collect() return X, y<prepare_x_and_y>
bins = [ 0, 4, 12, 18, 30, 50, 65, 100] age_index =(1,2,3,4,5,6,7) combdata['Age-bin'] = pd.cut(combdata.Age, bins, labels=age_index ).astype(int) combdata[['Age-bin', 'Survived']].groupby(['Age-bin'],as_index=False ).mean()
Titanic - Machine Learning from Disaster
2,064,952
x_train, y_train = feature_engineering(True,False) x_test, _ = feature_engineering(False,True )<drop_column>
combdata["Sex"] = combdata["Sex"].map({"male": 0, "female":1})
Titanic - Machine Learning from Disaster
2,064,952
x_train = reduce_mem_usage(x_train) x_test = reduce_mem_usage(x_test )<load_from_csv>
combdata["Fsize"] = combdata["SibSp"] + combdata["Parch"] + 1 combdata[['Fsize', 'Survived']].groupby(['Fsize'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
2,064,952
def post_rst(pred_test): df_sub = pd.read_csv(".. /input/sample_submission_V2.csv") df_test = pd.read_csv(".. /input/test_V2.csv") df_sub['winPlacePerc'] = pred_test df_sub = df_sub.merge(df_test[["Id", "matchId", "groupId", "maxPlace", "numGroups"]], on="Id", how="left") df_sub_group = df_sub.groupby(["matchId", "groupId"] ).first().reset_index() df_sub_group["rank"] = df_sub_group.groupby(["matchId"])["winPlacePerc"].rank() df_sub_group = df_sub_group.merge( df_sub_group.groupby("matchId")["rank"].max().to_frame("max_rank" ).reset_index() , on="matchId", how="left") df_sub_group["adjusted_perc"] =(df_sub_group["rank"] - 1)/(df_sub_group["numGroups"] - 1) df_sub = df_sub.merge(df_sub_group[["adjusted_perc", "matchId", "groupId"]], on=["matchId", "groupId"], how="left") df_sub["winPlacePerc"] = df_sub["adjusted_perc"] df_sub.loc[df_sub.maxPlace == 0, "winPlacePerc"] = 0 df_sub.loc[df_sub.maxPlace == 1, "winPlacePerc"] = 1 subset = df_sub.loc[df_sub.maxPlace > 1] gap = 1.0 /(subset.maxPlace.values - 1) new_perc = np.around(subset.winPlacePerc.values / gap)* gap df_sub.loc[df_sub.maxPlace > 1, "winPlacePerc"] = new_perc df_sub.loc[(df_sub.maxPlace > 1)&(df_sub.numGroups == 1), "winPlacePerc"] = 0 assert df_sub["winPlacePerc"].isnull().sum() == 0 df_sub[["Id", "winPlacePerc"]].to_csv("submission_adjusted.csv", index=False )<choose_model_class>
combdata = combdata.drop(labels='SibSp', axis=1 )
Titanic - Machine Learning from Disaster
2,064,952
def nn_model(input_shape=x_train.shape[1], hidden_size=64): model = Sequential() model.add(Dense(hidden_size, input_dim=input_shape, kernel_initializer='normal')) model.add(LeakyReLU(0.1)) model.add(Dense(hidden_size, kernel_initializer='normal')) model.add(LeakyReLU(0.1)) model.add(Dense(hidden_size, kernel_initializer='normal')) model.add(LeakyReLU(0.1)) model.add(Dense(hidden_size, kernel_initializer='normal')) model.add(LeakyReLU(0.1)) model.add(Dense(1, kernel_initializer='normal')) model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mae']) return model<split>
combdata = combdata.drop(labels='Parch', axis=1 )
Titanic - Machine Learning from Disaster
2,064,952
def train_val_split(x_train, y_train): train_index = round(int(x_train.shape[0]*0.8)) dev_X = x_train[:train_index] val_X = x_train[train_index:] dev_y = y_train[:train_index] val_y = y_train[train_index:] del x_train, y_train gc.collect() ; return dev_X, val_X, dev_y, val_y<split>
combdata.Ticket = combdata.Ticket.map(lambda x: x[0]) combdata[['Ticket', 'Survived']].groupby(['Ticket'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
2,064,952
dev_X, val_X, dev_y, val_y = train_val_split(x_train, y_train) def run_lgb(train_X, train_y, val_X, val_y, x_test): params = {"objective" : "regression", "metric" : "mae", 'n_estimators':5000, 'early_stopping_rounds':200, "num_leaves" : 150, "learning_rate" : 0.05, "bagging_fraction" : 0.5, "bagging_seed" : 0, "num_threads" : 4,"colsample_bytree" : 0.5 } lgtrain = lgb.Dataset(train_X, label=train_y) lgval = lgb.Dataset(val_X, label=val_y) model = lgb.train(params, lgtrain, valid_sets=[lgtrain, lgval], early_stopping_rounds=200, verbose_eval=1000) pred_test_y = model.predict(x_test, num_iteration=model.best_iteration) return pred_test_y pred_test_lgb = run_lgb(dev_X, dev_y, val_X, val_y, x_test) pred_test_lgb = pred_test_lgb.reshape(-1,1) del dev_X, val_X, dev_y, val_y gc.collect()<train_model>
combdata['Ticket'].value_counts()
Titanic - Machine Learning from Disaster
2,064,952
np.random.seed(42) std_scaler = StandardScaler().fit(x_train) x_train = std_scaler.transform(x_train) x_test = std_scaler.transform(x_test) mlp_model = nn_model(x_train.shape[1], hidden_size=64) mlp_model.fit(x_train, y_train, batch_size=128, epochs=8, verbose=1, validation_split=0.1, shuffle=True) pred_test_mlp = mlp_model.predict(x_test) del mlp_model gc.collect()<drop_column>
combdata['Ticket'] = combdata['Ticket'].replace(['A','W','F','L','5','6','7','8','9'], '4') combdata[['Ticket', 'Survived']].groupby(['Ticket'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
2,064,952
del x_train, y_train, x_test gc.collect()<define_variables>
combdata = pd.get_dummies(combdata, columns = ["Ticket"], prefix="T" )
Titanic - Machine Learning from Disaster
2,064,952
pred_test =(pred_test_mlp + pred_test_lgb)/ 2.0<import_modules>
combdata["Cabin"] = pd.Series([i[0] if not pd.isnull(i)else 'U' for i in combdata['Cabin'] ] )
Titanic - Machine Learning from Disaster
2,064,952
import numpy as np import pandas as pd import os <set_options>
combdata = combdata.drop(labels='Cabin', axis=1 )
Titanic - Machine Learning from Disaster
2,064,952
mpl.rcParams['font.sans-serif'] = ['FangSong'] mpl.rcParams['axes.unicode_minus'] = False %matplotlib inline sns.set_style('darkgrid') sns.set_palette('bone') warnings.filterwarnings('ignore') gc.enable() INPUT_DIR = ".. /input/"<data_type_conversions>
combdata = combdata.drop(labels='Embarked', axis=1 )
Titanic - Machine Learning from Disaster
2,064,952
def fillInf(df, val): numcols = df.select_dtypes(include='number' ).columns cols = numcols[numcols != 'winPlacePerc'] df[df == np.Inf] = np.NaN df[df == np.NINF] = np.NaN for c in cols: df[c].fillna(val, inplace=True )<load_from_csv>
combdata =combdata.drop(labels=['Age', 'Fare', 'Name'],axis = 1 )
Titanic - Machine Learning from Disaster
2,064,952
def feature_engineering(is_train=True): if is_train: print("processing train.csv") df = pd.read_csv(INPUT_DIR + 'train_V2.csv') df = df[df['maxPlace'] > 1] else: print("processing test.csv") df = pd.read_csv(INPUT_DIR + 'test_V2.csv') df.dropna(inplace=True) df['totalDistance'] = df['rideDistance'] + df["walkDistance"] + df["swimDistance"] match = df.groupby('matchId') df['killPlacePerc'] = match['kills'].rank(pct=True ).values df['walkDistancePerc'] = match['walkDistance'].rank(pct=True ).values df['_totalDistance'] = df['rideDistance'] + df['walkDistance'] + df['swimDistance'] df['zombi'] =(( df['_totalDistance'] == 0)|(df['kills'] == 0) |(df['weaponsAcquired'] == 0) |(df['matchType'].str.contains('solo')) ).astype(int) df['cheater'] =(( df['kills'] / df['_totalDistance'] >= 1) |(df['kills'] > 30)|(df['roadKills'] > 10)).astype(int) pd.concat([df['zombi'].value_counts() , df['cheater'].value_counts() ], axis=1 ).T df['_healthAndBoosts'] = df['heals'] + df['boosts'] df['_killDamage'] = df['kills'] * 100 + df['damageDealt'] df['_killPlaceOverMaxPlace'] = df['killPlace'] / df['maxPlace'] df['_killsOverWalkDistance'] = df['kills'] / df['walkDistance'] df['_walkDistancePerSec'] = df['walkDistance'] / df['matchDuration'] fillInf(df, 0) mapper = lambda x: 'solo' if('solo' in x)else 'duo' if('duo' in x)or('crash' in x)else 'squad' df['matchType'] = df['matchType'].map(mapper) df['matchType'] = df['matchType'].map(mapper) a = pd.get_dummies(df['matchType'], prefix='matchType') df = pd.concat([df, a], axis=1) df.drop(['headshotKills','teamKills','roadKills','vehicleDestroys'], axis=1, inplace=True) df.drop(['rideDistance','swimDistance','matchDuration'], axis=1, inplace=True) df.drop(['rankPoints','killPoints','winPoints'], axis=1, inplace=True) df.drop(['matchType'], axis=1, inplace=True) print("remove some columns") target = 'winPlacePerc' features = list(df.columns) features.remove("Id") features.remove("matchId") features.remove("groupId") y = None print("get target") if is_train: y = np.array(df.groupby(['matchId','groupId'])[target].agg('mean'), dtype=np.float64) features.remove(target) print("get group mean feature") agg = df.groupby(['matchId','groupId'])[features].agg('mean') agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index() if is_train: df_out = agg.reset_index() [['matchId','groupId']] else: df_out = df[['matchId','groupId']] df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId']) df_out = df_out.merge(agg_rank, suffixes=["_mean", "_mean_rank"], how='left', on=['matchId', 'groupId']) del agg, agg_rank gc.collect() print("get group max feature") agg = df.groupby(['matchId','groupId'])[features].agg('max') agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index() df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId']) df_out = df_out.merge(agg_rank, suffixes=["_max", "_max_rank"], how='left', on=['matchId', 'groupId']) del agg, agg_rank gc.collect() print("get group min feature") agg = df.groupby(['matchId','groupId'])[features].agg('min') agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index() df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId']) df_out = df_out.merge(agg_rank, suffixes=["_min", "_min_rank"], how='left', on=['matchId', 'groupId']) del agg, agg_rank gc.collect() print("get group size feature") agg = df.groupby(['matchId','groupId'] ).size().reset_index(name='group_size') df_out = df_out.merge(agg, how='left', on=['matchId', 'groupId']) print("get match mean feature") agg = df.groupby(['matchId'])[features].agg('mean' ).reset_index() df_out = df_out.merge(agg, suffixes=["", "_match_mean"], how='left', on=['matchId']) del agg gc.collect() print("get match size feature") agg = df.groupby(['matchId'] ).size().reset_index(name='match_size') df_out = df_out.merge(agg, how='left', on=['matchId']) gc.collect() df_out.drop(["matchId", "groupId"], axis=1, inplace=True) X = np.array(df_out, dtype=np.float64) feature_names = list(df_out.columns) del df, df_out, agg gc.collect() return X, y, feature_names<normalization>
from sklearn.svm import SVC from collections import Counter from sklearn.tree import DecisionTreeClassifier from sklearn.neural_network import MLPClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier
Titanic - Machine Learning from Disaster
2,064,952
x_train, y, feature_names = feature_engineering(True) scaler = MinMaxScaler(feature_range=(-1, 1), copy=False ).fit(x_train) scaler.transform(x_train )<normalization>
train = combdata.loc[combdata['source']=="train"] test = combdata.loc[combdata['source']=="test"] test.drop(labels=["Survived"],axis = 1,inplace=True) train.drop(labels=["source"],axis = 1,inplace=True) test.drop(labels=["source"],axis = 1,inplace=True) test.shape
Titanic - Machine Learning from Disaster
2,064,952
x_prediction, _, _ = feature_engineering(False) scaler = MinMaxScaler(feature_range=(-1, 1), copy=False ).fit(x_prediction) scaler.transform(x_prediction )<split>
train["Survived"] = train["Survived"].astype(int) Y_train = train["Survived"] X_train = train.drop(labels = ["Survived"],axis = 1) X_train.shape
Titanic - Machine Learning from Disaster
2,064,952
X_train,X_test, y_train, y_test =train_test_split(x_train,y,test_size=0.3, random_state=0 )<train_model>
kfold = StratifiedKFold(n_splits=10 )
Titanic - Machine Learning from Disaster
2,064,952
%%time linreg = LinearRegression() linreg.fit(X_train, y_train) print(linreg.intercept_) print(linreg.coef_ )<compute_test_metric>
random_state = 2 classifiers = [] classifiers.append(KNeighborsClassifier()) classifiers.append(LinearDiscriminantAnalysis()) classifiers.append(SVC(random_state=random_state)) classifiers.append(MLPClassifier(random_state=random_state)) classifiers.append(ExtraTreesClassifier(random_state=random_state)) classifiers.append(LogisticRegression(random_state = random_state)) classifiers.append(DecisionTreeClassifier(random_state=random_state)) classifiers.append(RandomForestClassifier(random_state=random_state)) classifiers.append(GradientBoostingClassifier(random_state=random_state)) classifiers.append(AdaBoostClassifier(DecisionTreeClassifier(random_state=random_state),random_state=random_state,learning_rate=0.1)) cv_results = [] for classifier in classifiers : cv_results.append(cross_val_score(classifier, X_train, y = Y_train, scoring = "accuracy", cv = kfold, n_jobs=4)) cv_means = [] cv_std = [] for cv_result in cv_results: cv_means.append(cv_result.mean()) cv_std.append(cv_result.std()) cv_res = pd.DataFrame({"CrossValMeans":cv_means,"CrossValerrors": cv_std, "Algorithm":["SVC", "AdaBoost", "ExtraTrees", "KNeighboors", "DecisionTree", "RandomForest", "GradientBoosting", "LogisticRegression", "MultipleLayerPerceptron", "LinearDiscriminantAnalysis"]}) g = sns.barplot("CrossValMeans","Algorithm",data = cv_res, palette="Set3",orient = "h",**{'xerr':cv_std}) g.set_xlabel("Mean Accuracy") g = g.set_title("Cross validation scores" )
Titanic - Machine Learning from Disaster
2,064,952
<compute_test_metric>
DTC = DecisionTreeClassifier() adaDTC = AdaBoostClassifier(DTC, random_state=7) ada_param_grid = {"base_estimator__criterion" : ["gini", "entropy"], "base_estimator__splitter" : ["best", "random"], "algorithm" : ["SAMME","SAMME.R"], "n_estimators" :[1,2], "learning_rate": [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3,1.5]} gsadaDTC = GridSearchCV(adaDTC,param_grid = ada_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1) gsadaDTC.fit(X_train,Y_train) ada_best = gsadaDTC.best_estimator_ gsadaDTC.best_score_
Titanic - Machine Learning from Disaster
2,064,952
<predict_on_test>
ExtC = ExtraTreesClassifier() ex_param_grid = {"max_depth": [None], "max_features": [1, 3, 10], "min_samples_split": [2, 3, 10], "min_samples_leaf": [1, 3, 10], "bootstrap": [False], "n_estimators" :[100,300], "criterion": ["gini"]} gsExtC = GridSearchCV(ExtC,param_grid = ex_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1) gsExtC.fit(X_train,Y_train) ExtC_best = gsExtC.best_estimator_ gsExtC.best_score_
Titanic - Machine Learning from Disaster
2,064,952
%%time result = linreg.predict(x_prediction )<load_from_csv>
RFC = RandomForestClassifier() rf_param_grid = {"max_depth": [None], "max_features": [1, 3, 10], "min_samples_split": [2, 3, 10], "min_samples_leaf": [1, 3, 10], "bootstrap": [False], "n_estimators" :[100,300], "criterion": ["gini"]} gsRFC = GridSearchCV(RFC,param_grid = rf_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1) gsRFC.fit(X_train,Y_train) RFC_best = gsRFC.best_estimator_ gsRFC.best_score_
Titanic - Machine Learning from Disaster
2,064,952
%%time test_data = pd.read_csv(INPUT_DIR+'test_V2.csv') print("fix winPlacePerc") for i in range(len(test_data)) : winPlacePerc = result[i] maxPlace = int(test_data.iloc[i]['maxPlace']) if maxPlace == 0: winPlacePerc = 0.0 elif maxPlace == 1: winPlacePerc = 1.0 else: gap = 1.0 /(maxPlace - 1) winPlacePerc = round(winPlacePerc / gap)* gap if winPlacePerc < 0: winPlacePerc = 0.0 if winPlacePerc > 1: winPlacePerc = 1.0 result[i] = winPlacePerc<save_to_csv>
GBC = GradientBoostingClassifier() gb_param_grid = {'loss' : ["deviance"], 'n_estimators' : [100,200,300], 'learning_rate': [0.1, 0.05, 0.01], 'max_depth': [4, 8], 'min_samples_leaf': [100,150], 'max_features': [0.3, 0.1] } gsGBC = GridSearchCV(GBC,param_grid = gb_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1) gsGBC.fit(X_train,Y_train) GBC_best = gsGBC.best_estimator_ gsGBC.best_score_
Titanic - Machine Learning from Disaster
2,064,952
f3=open(INPUT_DIR+'sample_submission_V2.csv') submit=pd.read_csv(f3) sample_result = pd.DataFrame(result,columns = ['winPlacePerc']) submit['winPlacePerc'] = sample_result submit.to_csv(r'sample_submission_lineregression.csv', index=False) del f3,result,submit gc.collect()<train_model>
SVMC = SVC(probability=True) svc_param_grid = {'kernel': ['rbf'], 'gamma': [ 0.001, 0.01, 0.1, 1], 'C': [1, 10, 50, 100,200,300, 1000]} gsSVMC = GridSearchCV(SVMC,param_grid = svc_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1) gsSVMC.fit(X_train,Y_train) SVMC_best = gsSVMC.best_estimator_ gsSVMC.best_score_
Titanic - Machine Learning from Disaster
2,064,952
%%time model_lasso = Lasso(alpha=0.001) model_lasso.fit(X_train, y_train) print(model_lasso.intercept_) print(model_lasso.coef_ )<predict_on_test>
votingC = VotingClassifier(estimators=[('rfc', RFC_best),('extc', ExtC_best), ('svc', SVMC_best),('adac',ada_best),('gbc',GBC_best)], voting='soft', n_jobs=4) votingC = votingC.fit(X_train, Y_train )
Titanic - Machine Learning from Disaster
2,064,952
%%time predicted_lasso = model_lasso.predict(x_prediction )<load_from_csv>
test_Survived = pd.Series(votingC.predict(test), name="Survived") results = pd.concat([IDtest,test_Survived],axis=1) results.to_csv("Final Submission File.csv",index=False )
Titanic - Machine Learning from Disaster
10,345,958
%%time test_data = pd.read_csv(INPUT_DIR+'test_V2.csv') print("fix winPlacePerc") for i in range(len(test_data)) : winPlacePerc = predicted_lasso[i] maxPlace = int(test_data.iloc[i]['maxPlace']) if maxPlace == 0: winPlacePerc = 0.0 elif maxPlace == 1: winPlacePerc = 1.0 else: gap = 1.0 /(maxPlace - 1) winPlacePerc = round(winPlacePerc / gap)* gap if winPlacePerc < 0: winPlacePerc = 0.0 if winPlacePerc > 1: winPlacePerc = 1.0 predicted_lasso[i] = winPlacePerc<save_to_csv>
import pandas as pd import numpy as np import seaborn as sns from sklearn.preprocessing import LabelEncoder from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier
Titanic - Machine Learning from Disaster
10,345,958
f4=open(INPUT_DIR+'sample_submission_V2.csv') submit_lasso=pd.read_csv(f4) sample_result_lasso = pd.DataFrame(predicted_lasso,columns = ['winPlacePerc']) submit_lasso['winPlacePerc'] = sample_result_lasso submit_lasso.to_csv(r'sample_submission_lasso.csv', index=False) del f4,submit_lasso,sample_result_lasso gc.collect()<train_model>
from sklearn.model_selection import GridSearchCV
Titanic - Machine Learning from Disaster
10,345,958
%%time model_ridge = Ridge(alpha=0.5994842503189409) model_ridge.fit(X_train, y_train) print(model_ridge.intercept_) print(model_ridge.coef_ )<predict_on_test>
from sklearn.model_selection import GridSearchCV
Titanic - Machine Learning from Disaster
10,345,958
%%time predicted_ridge = model_ridge.predict(x_prediction )<load_from_csv>
train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') print(train_df.shape) print(test_df.shape) train_df.head()
Titanic - Machine Learning from Disaster
10,345,958
%%time test_data = pd.read_csv(INPUT_DIR+'test_V2.csv') print("fix winPlacePerc") for i in range(len(test_data)) : winPlacePerc = predicted_ridge[i] maxPlace = int(test_data.iloc[i]['maxPlace']) if maxPlace == 0: winPlacePerc = 0.0 elif maxPlace == 1: winPlacePerc = 1.0 else: gap = 1.0 /(maxPlace - 1) winPlacePerc = round(winPlacePerc / gap)* gap if winPlacePerc < 0: winPlacePerc = 0.0 if winPlacePerc > 1: winPlacePerc = 1.0 predicted_ridge[i] = winPlacePerc<save_to_csv>
train_df[['Pclass','Survived']].groupby('Pclass' ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
10,345,958
f5=open(INPUT_DIR+'sample_submission_V2.csv') submit_ridge=pd.read_csv(f5) sample_result_ridge = pd.DataFrame(predicted_ridge,columns = ['winPlacePerc']) submit_ridge['winPlacePerc'] = sample_result_ridge submit_ridge.to_csv(r'sample_submission_ridge.csv', index=False) del f5,submit_ridge,sample_result_ridge gc.collect()<train_model>
train_df[['Sex','Survived']].groupby('Sex' ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
10,345,958
%%time model_elasticnet = ElasticNet(alpha=1.6152516038498196e-06, copy_X=True, fit_intercept=True, l1_ratio=0.5, max_iter=1000, normalize=False, positive=False, precompute=False, random_state=0, selection='cyclic', tol=0.0001, warm_start=False) model_elasticnet.fit(X_train, y_train) print(model_elasticnet.intercept_) print(model_elasticnet.coef_ )<predict_on_test>
train_df[['SibSp','Survived']].groupby('SibSp' ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
10,345,958
%%time predicted_elasticNet = model_elasticnet.predict(x_prediction )<load_from_csv>
train_df[['Parch','Survived']].groupby('Parch' ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
10,345,958
%%time test_data = pd.read_csv(INPUT_DIR+'test_V2.csv') print("fix winPlacePerc") for i in range(len(test_data)) : winPlacePerc = predicted_elasticNet[i] maxPlace = int(test_data.iloc[i]['maxPlace']) if maxPlace == 0: winPlacePerc = 0.0 elif maxPlace == 1: winPlacePerc = 1.0 else: gap = 1.0 /(maxPlace - 1) winPlacePerc = round(winPlacePerc / gap)* gap if winPlacePerc < 0: winPlacePerc = 0.0 if winPlacePerc > 1: winPlacePerc = 1.0 predicted_elasticNet[i] = winPlacePerc<save_to_csv>
labelencoder = LabelEncoder() train_df['Sex'] = labelencoder.fit_transform(train_df['Sex']) test_df['Sex'] = labelencoder.fit_transform(test_df['Sex']) test_df.head()
Titanic - Machine Learning from Disaster
10,345,958
f6=open(INPUT_DIR+'sample_submission_V2.csv') submit_elasticNet=pd.read_csv(f6) sample_result_elasticNet = pd.DataFrame(predicted_elasticNet,columns = ['winPlacePerc']) submit_elasticNet['winPlacePerc'] = sample_result_elasticNet submit_elasticNet.to_csv(r'sample_submission_elasticNet.csv', index=False) del f6,submit_elasticNet,sample_result_elasticNet gc.collect()<import_modules>
train_df['Family size'] = train_df['SibSp'] + train_df['Parch'] + 1 test_df['Family size'] = test_df['SibSp'] + test_df['Parch'] + 1 train_df[['Family size','Survived']].groupby('Family size' ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
10,345,958
import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import Lasso from catboost import CatBoostRegressor from sklearn.metrics import mean_absolute_error from sklearn.preprocessing import StandardScaler,MinMaxScaler<load_from_csv>
train_df['Fam_type'] = pd.cut(train_df['Family size'], [0,1,4,7,11], labels=['Solo', 'Small', 'Big', 'Very big']) test_df['Fam_type'] = pd.cut(test_df['Family size'], [0,1,4,7,11], labels=['Solo', 'Small', 'Big', 'Very big'] )
Titanic - Machine Learning from Disaster
10,345,958
train=pd.read_csv('.. /input/train_V2.csv') test=pd.read_csv('.. /input/test_V2.csv') ID=test['Id']<count_missing_values>
combine = [train_df, test_df] for dataset in combine: dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False) pd.crosstab(train_df['Title'], train_df['Sex'] )
Titanic - Machine Learning from Disaster
10,345,958
train.isna().sum()<correct_missing_values>
for dataset in combine: dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Don', 'Sir', 'Jonkheer', 'Dona'],'Royalty') dataset['Title'] = dataset['Title'].replace(['Capt', 'Col','Dr','Major','Rev'],'Special') train_df[['Title','Survived']].groupby(['Title'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
10,345,958
train=train.dropna(axis=0 )<prepare_x_and_y>
y = train_df['Survived'] features = ['Pclass','Sex','Fam_type','Fare','Age Bin','Embarked'] X = train_df[features] X.head()
Titanic - Machine Learning from Disaster
10,345,958
y_train=train['winPlacePerc'] train=train.drop(['winPlacePerc'],axis=1 )<categorify>
numerical_col = ['Fare'] categorical_col = ['Pclass','Sex','Fam_type','Age Bin','Embarked'] num_trans = SimpleImputer(strategy = 'median') cat_trans = Pipeline(steps = [ ('imputer',SimpleImputer(strategy = 'most_frequent')) , ('onehot',OneHotEncoder()) ]) preprocessor = ColumnTransformer( transformers = [ ('num',num_trans,numerical_col), ('cat',cat_trans,categorical_col) ]) titanic_pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', RandomForestClassifier(random_state=0 )) ]) param_grid = { 'model__max_depth': [2, 3, 4, 5], 'model__min_samples_leaf': [3, 4, 5], 'model__min_samples_split': [6, 8, 10, 12], 'model__n_estimators': [100, 200, 300, 500] } search = GridSearchCV(titanic_pipeline, param_grid, n_jobs=-1) search.fit(X, y) print(search.best_params_)
Titanic - Machine Learning from Disaster
10,345,958
train["playersInMatch"] = train.groupby("matchId")["Id"].transform("count") train["playersInGroup"] = train.groupby("groupId")["Id"].transform("count") test["playersInMatch"] = test.groupby("matchId")["Id"].transform("count") test["playersInGroup"] = test.groupby("groupId")["Id"].transform("count" )<categorify>
numerical_col = ['Fare'] categorical_col = ['Pclass','Sex','Fam_type','Age Bin','Embarked'] num_trans = SimpleImputer(strategy = 'median') cat_trans = Pipeline(steps = [ ('imputer',SimpleImputer(strategy = 'most_frequent')) , ('onehot',OneHotEncoder()) ]) preprocessor = ColumnTransformer( transformers = [ ('num',num_trans,numerical_col), ('cat',cat_trans,categorical_col) ]) titanic_pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', RandomForestClassifier(random_state=0, max_depth = 5, n_estimators = 500 )) ]) titanic_pipeline.fit(X,y) print('Cross validation score: {:.3f}'.format(cross_val_score(titanic_pipeline, X, y, cv=10 ).mean()))
Titanic - Machine Learning from Disaster
10,345,958
<categorify><EOS>
predictions = titanic_pipeline.predict(X_test) output = pd.DataFrame({'PassengerId': test_df.PassengerId, 'Survived': predictions}) output.to_csv('my_submission2.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
12,392,871
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<categorify>
import pandas as pd import numpy as np from sklearn.model_selection import cross_validate from sklearn.gaussian_process.kernels import RBF, WhiteKernel, ConstantKernel, RationalQuadratic, DotProduct,Matern from sklearn.gaussian_process import GaussianProcessClassifier
Titanic - Machine Learning from Disaster
12,392,871
train['LastMan'] = train.groupby('groupId')['matchDuration'].transform('max') test['LastMan'] = test.groupby('groupId')['matchDuration'].transform('max' )<feature_engineering>
train_df = pd.read_csv('.. /input/titanic/train.csv') test_df = pd.read_csv('.. /input/titanic/test.csv') for test_set,df in enumerate([train_df, test_df]): df['Title'] = df.Name.str.extract('([A-Za-z]+)\.', expand=False) df['Title'] = df['Title'].replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') df["Title"] = df["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3} ).fillna(3 ).astype(int) df['Sex'] = df['Sex'].map({'female': 1, 'male': 0} ).fillna(2 ).astype(int) df['Embarked'] = df['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).fillna(3 ).astype(int) df["Cabin"] = pd.Series([i[0] if not pd.isnull(i)else 'X' for i in df['Cabin'] ]) df['Cabin'] = df['Cabin'].map({'A': 0, 'B': 1, 'C': 2,'D': 3, 'E': 4, 'F': 5,'G': 6, 'T': 7, 'X': 8} ).fillna(9 ).astype(int) df = df.drop(['Name', 'PassengerId', 'Ticket'], axis=1) index_NaN = list(df["Fare"][df["Fare"].isnull() ].index) for i in index_NaN: df.loc[i,'Fare'] = df.Fare[df['Pclass'] == df.loc[i,"Pclass"]].median() index_NaN = list(df["Age"][df["Age"].isnull() ].index) for i in index_NaN: idx =(( df['SibSp'] == df.iloc[i]["SibSp"])& (df['Parch'] == df.iloc[i]["Parch"])& (df['Pclass'] == df.iloc[i]["Pclass"])) age_pred = df.Age[idx].median() if np.isnan(age_pred): df.loc[i,'Age'] = df["Age"].median() else: df.loc[i,'Age'] = age_pred if test_set == True: X_test = df else: X_train = df.drop(['Survived'], axis=1) y_train = df["Survived"] print(X_train.head(5)) print(X_test.head(5)) print(y_train.head(5)) dx = X_train.shape[1]
Titanic - Machine Learning from Disaster
12,392,871
train['Survival'] = train['LastMan'] - train['FirstMan'] test['Survival'] = test['LastMan'] - test['FirstMan']<feature_engineering>
ls = np.ones(( dx,)) kernels = [ConstantKernel() * RBF(ls)+ WhiteKernel() , ConstantKernel() * Matern(ls)+ WhiteKernel() , ] best_score = 0 for kernel in kernels: classifier = GaussianProcessClassifier(kernel=kernel,n_restarts_optimizer=5) classifier.fit(X_train, y_train) result = cross_validate(classifier,X_train, y_train,cv=3) score = result['test_score'].mean() print('Average result: ' + str(score)+ ', Training time: ' + str(result['fit_time'].mean())) if score > best_score: best_score = score best_classifier = classifier
Titanic - Machine Learning from Disaster
12,392,871
train['Position'] = train['killPlace'] /(train['maxPlace'] + 1e-9) test['Position'] = test['killPlace'] /(test['maxPlace'] + 1e-9 )<drop_column>
predictions = best_classifier.predict(X_test ).astype(int) output = pd.DataFrame({'PassengerId': test_df.PassengerId, 'Survived': predictions}) print(best_classifier.kernel) print(output) output.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
10,757,188
train.drop(["matchId","groupId",'Id','killPoints', 'maxPlace', 'winPoints','vehicleDestroys'],axis=1,inplace=True) test.drop(["matchId","groupId",'Id','killPoints', 'maxPlace', 'winPoints','vehicleDestroys'],axis=1,inplace=True )<feature_engineering>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head(5 )
Titanic - Machine Learning from Disaster
10,757,188
train['headshotrate'] = train['kills'] /(train['headshotKills'] + 1e-9) test['headshotrate'] = test['kills'] /(test['headshotKills'] + 1e-9) train['killStreakrate'] = train['killStreaks'] /(train['kills'] + 1e-9) test['killStreakrate'] = test['killStreaks'] /(test['kills'] + 1e-9 )<feature_engineering>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.head(5 )
Titanic - Machine Learning from Disaster
10,757,188
train['TotalDamage'] = train['damageDealt'] + train['teamKills']*100 test['TotalDamage'] = test['damageDealt'] + test['teamKills']*100<feature_engineering>
data_dict = {} data_dict['train data'] = train_data data_dict['test data'] = test_data;
Titanic - Machine Learning from Disaster
10,757,188
train['Noob']=(train['matchDuration'] < train['matchDuration'].mean()) test['Noob']=(test['matchDuration'] < train['matchDuration'].mean() )<feature_engineering>
compare_features(train_data, 'Pclass','Age' )
Titanic - Machine Learning from Disaster
10,757,188
train['Sniper']=(train['longestKill']>=250) test['Sniper']=(test['longestKill']>=250 )<feature_engineering>
compare_features(train_data, 'SibSp', 'Survived' )
Titanic - Machine Learning from Disaster
10,757,188
train['ProAim']=(train['headshotKills']/(train['kills']+1e-9)) test['ProAim']=(test['headshotKills']/(test['kills']+1e-9))<feature_engineering>
compare_features(train_data, 'Parch', 'Survived' )
Titanic - Machine Learning from Disaster
10,757,188
train['distance'] =(train['rideDistance']+train['swimDistance']+train['walkDistance']) test['distance'] =(test['rideDistance']+test['swimDistance']+test['walkDistance']) train['distance'] = np.log1p(train['distance']) test['distance'] = np.log1p(test['distance'] )<define_variables>
compare_features(train_data, 'Embarked', 'Survived' )
Titanic - Machine Learning from Disaster
10,757,188
set1=set(i for i in train[(train['kills']>40)&(train['heals']==0)].index.tolist()) set2=set(i for i in train[(train['distance']==0)&(train['kills']>20)].index.tolist()) set3=set(i for i in train[(train['damageDealt']>4000)&(train['heals']<2)].index.tolist()) set4=set(i for i in train[(train['rideDistance']>25000)].index.tolist()) set5=set(i for i in train[(train['killStreaks']>3)&(train['weaponsAcquired']> 30)].index.tolist()) sets=set1 | set2 | set3 | set4 | set5<prepare_x_and_y>
train_data.groupby(['Embarked'] ).mean().drop(['PassengerId', 'SibSp', 'Parch'], axis=1 )
Titanic - Machine Learning from Disaster
10,757,188
train=train.drop(list(sets)) y_train=y_train.drop(list(sets)) <feature_engineering>
def concat_data(data_1, data_2): return pd.concat([data_1, data_2], sort=False ).reset_index(drop=True) def divide_data(all_data): return all_data.loc[:890], all_data.loc[891:].drop(['Survived'], axis=1) data_all = concat_data(train_data, test_data) data_fs = [train_data, test_data]
Titanic - Machine Learning from Disaster
10,757,188
fpp=['crashfpp','duo-fpp','flare-fpp','normal-duo-fpp','normal-solo-fpp','normal-squad-fpp','solo-fpp','squad-fpp'] train["fpp"] = np.where(train["matchType"].isin(fpp),1,0) test["fpp"] = np.where(test["matchType"].isin(fpp),1,0 )<define_variables>
miss_data_dict = {} for key, dataset in data_dict.items() : miss_abs = dataset.isnull().sum() miss_rel = miss_abs / dataset.isnull().count() col_abs = '{}: missing values(absolut)'.format(key) col_rel = '{}: missing values(relative in %)'.format(key) if key == 'test data': miss_data_dict[key] = pd.concat([miss_abs.sort_values(ascending=False), miss_rel.sort_values(ascending=False)*100], axis=1, keys=[col_abs, col_rel]) elif key == 'train data': miss_data_dict[key] = pd.concat([miss_abs.sort_values(ascending=False), miss_rel.sort_values(ascending=False)*100], axis=1, keys=[col_abs, col_rel])
Titanic - Machine Learning from Disaster
10,757,188
change={'crashfpp':'crash', 'crashtpp':'crash', 'duo':'duo', 'duo-fpp':'duo', 'flarefpp':'flare', 'flaretpp':'flare', 'normal-duo':'duo', 'normal-duo-fpp':'duo', 'normal-solo':'solo', 'normal-solo-fpp':'solo', 'normal-squad':'squad', 'normal-squad-fpp':'squad', 'solo-fpp':'solo', 'squad-fpp':'squad', 'solo':'solo', 'squad':'squad' } train['matchType']=train['matchType'].map(change) test['matchType']=test['matchType'].map(change )<categorify>
msno.matrix(test_data,figsize=(9,2),width_ratios=(10,1)) miss_data_dict['test data'].head()
Titanic - Machine Learning from Disaster
10,757,188
modes={'crash':1, 'duo':2, 'flare':3, 'solo':4, 'squad':5 } train['matchType']=train['matchType'].map(modes) test['matchType']=test['matchType'].map(modes )<categorify>
data_all.groupby(['Pclass', 'Sex'])['Age'].mean()
Titanic - Machine Learning from Disaster
10,757,188
d1=pd.get_dummies(train['matchType']) train=train.drop(['matchType'],axis=1) train=train.join(d1) d2=pd.get_dummies(test['matchType']) test=test.drop(['matchType'],axis=1) test=test.join(d2) <normalization>
index = data_all['Age'].index[data_all['Age'].apply(np.isnan)] data_all.loc[index, ['Pclass', 'Sex', 'Age']]
Titanic - Machine Learning from Disaster
10,757,188
scaler = MinMaxScaler() scaler.fit(train) train=scaler.transform(train) test=scaler.transform(test )<create_dataframe>
data_all['Age'] = data_all.groupby(['Pclass', 'Sex'])['Age'].apply(lambda x: x.fillna(x.mean())) train_data, test_data = divide_data(data_all) data_dict['train data'] = train_data data_dict['test data'] = test_data data_all.loc[index, ['Pclass', 'Sex', 'Age']]
Titanic - Machine Learning from Disaster
10,757,188
df = pd.DataFrame(train) df.isnull().sum()<split>
data_all.Cabin = data_all.Cabin.fillna('Unknown') data_all['Deck'] = data_all['Cabin'].str[0]
Titanic - Machine Learning from Disaster
10,757,188
X_train,X_test,y_train,y_test= train_test_split(train,y_train,test_size=0.3 )<train_model>
data_all.groupby(['Pclass'] ).Deck.value_counts()
Titanic - Machine Learning from Disaster
10,757,188
lm = Lasso(alpha=1e-5) lm.fit(X_train,y_train )<compute_test_metric>
data_all.groupby(['Deck'] ).mean().drop(['PassengerId', 'SibSp', 'Parch'], axis=1 )
Titanic - Machine Learning from Disaster
10,757,188
train_mse =(mean_absolute_error(y_train,lm.predict(X_train))) test_mse =(mean_absolute_error(y_test, lm.predict(X_test))) train_mse,test_mse<predict_on_test>
deck_avg_fare = data_all.groupby(['Deck'] ).Fare.mean().drop('U') deck_avg_fare
Titanic - Machine Learning from Disaster
10,757,188
y_train = y_train - lm.predict(X_train) y_test = y_test - lm.predict(X_test )<define_variables>
indices_U = data_all[data_all['Deck'] == 'U'].index for i in indices_U: fare = data_all.iloc[i].Fare nearest_avg_fare = min(deck_avg_fare, key=lambda x:abs(x-fare)) deck = deck_avg_fare[deck_avg_fare == nearest_avg_fare].index[0] data_all['Deck'].iloc[i] = deck train_data, test_data = divide_data(data_all) data_dict['train data'] = train_data data_dict['test data'] = test_data
Titanic - Machine Learning from Disaster
10,757,188
train_pool = Pool(X_train, y_train) test_pool = Pool(X_test, y_test )<choose_model_class>
data_all.loc[indices_U, ['Fare', 'Deck']]
Titanic - Machine Learning from Disaster
10,757,188
model = CatBoostRegressor( iterations=5000, depth=10, learning_rate=0.1, l2_leaf_reg= 2, loss_function='RMSE', eval_metric='MAE', random_strength=0.1, bootstrap_type='Bernoulli', leaf_estimation_method='Gradient', leaf_estimation_iterations=1, boosting_type='Plain' ,task_type = "GPU" ,feature_border_type='GreedyLogSum' ,random_seed=1234 )<train_model>
def handle_missing_data(data, miss_abs): for col in data: miss_prop = data[col].isna().sum() /len(data) if miss_prop < 0.5: if data[col].dtype == "float64" and data[col].isnull().sum() > 0: mean = data[col].mean() data[col] = data[col].fillna(mean) print("Filling {} missing values in {} with mean value {:.0f}.".format(miss_abs[col], col, mean)) elif data[col].dtype == "object" and data[col].isnull().sum() > 0: top = data[col].describe().top data[col] = data[col].fillna(top) print("Filling {} missing values in {} with top value {}.".format(miss_abs[col], col, top)) return data
Titanic - Machine Learning from Disaster
10,757,188
model.fit(train_pool, eval_set=test_pool )<compute_test_metric>
for key, dataset in data_dict.items() : print('{} operations:'.format(key)) miss_abs = miss_data_dict[key]['{}: missing values(absolut)'.format(key)] miss_rel = miss_data_dict[key]['{}: missing values(relative in %)'.format(key)] data_dict[key] = handle_missing_data(dataset, miss_abs )
Titanic - Machine Learning from Disaster
10,757,188
train_mse =(mean_absolute_error(y_train,lm.predict(X_train)+ model.predict(X_train))) test_mse =(mean_absolute_error(y_test, lm.predict(X_test)+ model.predict(X_test))) print('Train error= ',train_mse) print('Test error= ',test_mse) <save_to_csv>
def title_extract(data): data['Title'] = data['Name'].str.split(',', expand=True)[1].str.split('.', expand=True)[0]
Titanic - Machine Learning from Disaster
10,757,188
subm = pd.read_csv('.. /input/sample_submission_V2.csv') predictions = model.predict(test)+ lm.predict(test) test = pd.read_csv('.. /input/test_V2.csv') test['winPlacePerc'] = predictions test['winPlacePerc'] = test.groupby('groupId')['winPlacePerc'].transform('median') subm['winPlacePerc'] = test['winPlacePerc'] subm['Id']=ID subm.to_csv('submission.csv', index = False) <define_variables>
for key, dataset in data_dict.items() : title = title_extract(dataset) data_all = concat_data(train_data, test_data) data_all['Title'].value_counts()
Titanic - Machine Learning from Disaster
10,757,188
kernel_start_time = datetime.datetime.now() train_path = '.. /input/train_V2.csv' test_path = '.. /input/test_V2.csv' print(os.listdir(".. /input")) <data_type_conversions>
title_names =(data_all['Title'].value_counts() < 10) for key, dataset in data_dict.items() : dataset['Title'] = dataset['Title'].apply(lambda x: 'Misc' if title_names.loc[x] == True else x) data_all = concat_data(train_data, test_data) print(data_all['Title'].value_counts() )
Titanic - Machine Learning from Disaster
10,757,188
data_types = {'Id':str,'groupId':str,'matchId':str,'assists':np.int8,'boosts':np.int8,'damageDealt':np.float16,'DBNOs':np.int8, 'headshotKills':np.int8,'heals':np.int8,'killPlace':np.int8,'killPoints':np.int16,'kills':np.int8,'killStreaks':np.int8,'longestKill':np.float16, 'matchDuration':np.int16,'matchType':str,'maxPlace':np.int8,'numGroups':np.int8,'rankPoints':np.int16,'revives':np.int8,'rideDistance':np.float16, 'roadKills':np.int8,'swimDistance':np.float16,'teamKills':np.int8,'vehicleDestroys':np.int8,'walkDistance':np.float16,'weaponsAcquired':np.int16, 'winPoints':np.int16,'winPlacePerc':np.float16} data_types['walkDistance'] = np.float32 data_types['rideDistance'] = np.float32 data_types['damageDealt'] = np.float32<define_variables>
for key, dataset in data_dict.items() : dataset['Age'] = dataset['Age'].astype('int', copy=True) age_labels = [1,2,3,4,5] dataset['Age interval'] = pd.cut(data_all['Age'].astype('int'),5) dataset['Age Code'] = pd.cut(data_all['Age'],5, labels=age_labels ).astype('int64') data_all = concat_data(train_data, test_data) print('Number of cases within age intervals in datast:') data_all['Age interval'].value_counts()
Titanic - Machine Learning from Disaster
10,757,188
zero_stdev_cols = ['median_match_roadKills', 'median_match_vehicleDestroys', 'median_match_road_kills_per_rideDistance', 'std_match_longestKill'] ulesess_stat_cols = ['median_match_revives','median_match_teamKills','median_match_swimDistance_norm', 'max_match_kill_streak_rate','max_group_roadKills','min_group_roadKills','median_match_assists', 'sum_group_roadKills','sum_group_vehicleDestroys','min_group_vehicleDestroys'] useless_match_type_names = ['normal-duo','flarefpp','normal-solo','crashtpp','normal-squad','flaretpp','normal-duo-fpp', 'normal-squadfpp', 'normal-squad-fpp', 'crashfpp','duo','squad-fpp','normal-solo-fpp'] all_useless_cols = zero_stdev_cols + ulesess_stat_cols + useless_match_type_names all_useless_cols = set(all_useless_cols) min_match_useful_cols = ['min_match_rankPoints', 'min_match_winPoints', 'min_match_killPoints']<feature_engineering>
for key, dataset in data_dict.items() : fare_labels = [1,2,3,4,5] dataset['Fare interval'] = pd.qcut(data_all['Fare'], 5) dataset['Fare Code'] = pd.qcut(data_all['Fare'], 5, labels=fare_labels ).astype('int64') data_all = concat_data(train_data, test_data) print('Number of cases per fare category in dataset(approx.equal):') data_all['Fare interval'].value_counts()
Titanic - Machine Learning from Disaster
10,757,188
def fix_missing_ranks(X, mean_ranks=None, rank_stds=None, rank_cols=['rankPoints', 'winPoints']): if(mean_ranks is None)or(rank_stds is None): mean_ranks = {} rank_stds = {} for rank_col in rank_cols: mean_ranks[rank_col] = X.loc[X[rank_col] > 1, rank_col].mean() rank_stds[rank_col] = X.loc[X[rank_col] > 1, rank_col].std() rank_deltas =(X.loc[(X['rankPoints'] > 1)&(X['winPoints'] <= 1), 'rankPoints'] - mean_ranks['rankPoints'])/ rank_stds['rankPoints'] X.loc[(X['rankPoints'] > 1)&(X['winPoints'] <= 1), 'winPoints'] = mean_ranks['winPoints'] + rank_stds['winPoints']*rank_deltas win_deltas =(X.loc[(X['rankPoints'] <= 1)&(X['winPoints'] > 1), 'winPoints'] - mean_ranks['winPoints'])/ rank_stds['winPoints'] X.loc[(X['rankPoints'] <= 1)&(X['winPoints'] > 1), 'rankPoints'] = mean_ranks['rankPoints'] + rank_stds['rankPoints']*win_deltas X.loc[(X['rankPoints'] <= 1)&(X['winPoints'] <= 1), 'rankPoints'] = mean_ranks['rankPoints'] X.loc[(X['rankPoints'] <= 1)&(X['winPoints'] <= 1), 'winPoints'] = mean_ranks['winPoints'] return X, mean_ranks, rank_stds<feature_engineering>
for key, dataset in data_dict.items() : dataset['Family Size'] = dataset ['SibSp'] + dataset['Parch'] + 1 data_all = concat_data(train_data, test_data )
Titanic - Machine Learning from Disaster
10,757,188
def add_player_features(X): X['headshot_rate'] = X['headshotKills'] /(X['kills'] + 0.00001) X['kill_streak_rate'] = X['killStreaks'] /(X['kills'] + 0.00001) X['kills_assists'] = X['kills'] + X['assists'] X['heals_boosts'] = X['heals'] + X['boosts'] X['total_distance'] = X['walkDistance'] + X['rideDistance'] + X['swimDistance'] X['kills_assists_per_heal_boost'] = X['kills_assists'] /(X['heals_boosts'] + 1) X['damageDealt_per_heal_boost'] = X['damageDealt'] /(X['heals_boosts'] + 1) X['road_kills_per_rideDistance'] = X['roadKills'] /(X['rideDistance'] + 0.01) X['maxPlace_per_numGroups'] = X['maxPlace'] / X['numGroups'] X['assists_per_kill'] = X['assists'] /(X['kills'] + X['assists'] + 0.0001) X['killPlace'] = X['killPlace'] - 1 return X<groupby>
family_mapping =(lambda s: 1 if s == 1 else(2 if s == 2 else(3 if 3 <= s <= 4 else(4 if s >= 5 else 0)))) for key, dataset in data_dict.items() : dataset['Family Size Code'] = dataset['Family Size'].map(family_mapping) data_all = concat_data(train_data, test_data) data_all['Family Size Code'].value_counts()
Titanic - Machine Learning from Disaster
10,757,188
def create_basic_group_info(X): group_cols = ['matchId', 'groupId', 'matchDuration', 'matchType', 'maxPlace', 'numGroups', 'maxPlace_per_numGroups', 'winPlacePerc', 'killPlace'] if 'winPlacePerc' not in X.columns: group_cols.remove('winPlacePerc') pl_data_grouped = X[group_cols].groupby(['matchId', 'groupId']) gr_data = pl_data_grouped.first() gr_data.drop(columns='killPlace', inplace=True) gr_data['raw_groupSize'] = pl_data_grouped['numGroups'].count() gr_data['groupSize'] = gr_data['raw_groupSize'] gr_data['group_size_overflow'] =(gr_data['groupSize'] > 4 ).astype(np.int8) gr_data.loc[gr_data['groupSize'] > 4, ['groupSize']] = 2 gr_data['meanGroupSize'] = gr_data.groupby('matchId')['groupSize'].transform(np.mean) gr_data['medianGroupSize'] = gr_data.groupby('matchId')['groupSize'].transform(np.median) gr_data['maxKillPlace'] = pl_data_grouped['killPlace'].max().groupby('matchId' ).transform(np.max) gr_data['totalPlayers'] = gr_data.groupby('matchId')['groupSize'].transform(sum) gr_data['totalPlayersAdjusted'] = gr_data['maxPlace'].astype(float)* gr_data['totalPlayers'] /(gr_data['numGroups'] + 0.01) gr_data['totalPlayersAdjusted'] = gr_data['totalPlayersAdjusted'].apply(lambda x: np.minimum(100.0, x)) gr_data['num_opponents'] = gr_data['totalPlayersAdjusted'] - gr_data['groupSize'] X = X.merge(gr_data[['num_opponents', 'totalPlayersAdjusted', 'groupSize', 'raw_groupSize', 'maxKillPlace']], on=['matchId', 'groupId']) print('group size counts:') print(X['raw_groupSize'].value_counts()) X['revives_per_groupSize'] = X['revives'] /(X['groupSize'] - 1 + 0.001) X['kills_assists_norm_both'] = X['kills_assists'].astype(np.float32)/ X['num_opponents'] / X['matchDuration'] X['killPlace_norm'] = X['killPlace']/(X['maxKillPlace'] + 0.000001) X['damageDealt_norm_both'] = X['damageDealt'].astype(np.float32)/ X['num_opponents'] / X['matchDuration'] X['DBNOs_norm'] = X['DBNOs'].astype(np.float32)/ X['num_opponents'] / X['matchDuration'] X['heals_norm'] = X['heals'].astype(np.float32)/ X['matchDuration'] X['boosts_norm'] = X['boosts'].astype(np.float32)/ X['matchDuration'] X['walkDistance_norm'] = X['walkDistance'].astype(np.float32)/ X['matchDuration'] X['rideDistance_norm'] = X['rideDistance'].astype(np.float32)/ X['matchDuration'] X['swimDistance_norm'] = X['swimDistance'].astype(np.float32)/ X['matchDuration'] gr_data = reduce_mem_usage(gr_data) gr_data.drop(columns=list(set(gr_data.columns)&all_useless_cols)) return gr_data, X<groupby>
for key, dataset in data_dict.items() : dataset['Mother'] = np.where(( dataset.Title == 'Mrs')&(dataset.Parch >0),1,0) data_all = concat_data(train_data, test_data )
Titanic - Machine Learning from Disaster
10,757,188
def create_group_and_match_stats(data, gr_data): group_stats_cols = ['assists','boosts','DBNOs','killPoints','longestKill','rankPoints', 'road_kills_per_rideDistance', 'kills_assists_norm_both', 'damageDealt_norm_both', 'DBNOs_norm', 'heals_boosts', 'assists_per_kill', 'killPlace_norm', 'revives','roadKills','teamKills','vehicleDestroys','weaponsAcquired','winPoints','headshot_rate','kill_streak_rate', 'kills_assists', 'heals_norm','walkDistance_norm','rideDistance_norm','swimDistance_norm', 'damageDealt_per_heal_boost','kills_assists_per_heal_boost'] match_stats_cols = ['assists','boosts','DBNOs','killPoints','longestKill','rankPoints', 'road_kills_per_rideDistance', 'kills_assists_norm_both', 'damageDealt_norm_both', 'DBNOs_norm', 'heals_boosts', 'assists_per_kill', 'revives','roadKills','teamKills','vehicleDestroys','weaponsAcquired','winPoints','headshot_rate','kill_streak_rate', 'kills_assists', 'heals_norm','walkDistance_norm','rideDistance_norm','swimDistance_norm', 'damageDealt_per_heal_boost','kills_assists_per_heal_boost'] pl_data_grouped_by_group = data.groupby(['matchId', 'groupId']) pl_data_grouped_by_match = data.groupby(['matchId']) group_funcs = {'min':np.min, 'max':np.max, 'sum':np.sum, 'median': np.mean} match_funcs = {'min':np.min,'max':np.max, 'sum':np.sum, 'median': np.median, 'std':np.std} extra_group_stats = pl_data_grouped_by_group[['matchId', 'groupId']].first().reset_index(drop=True) extra_match_stats = pl_data_grouped_by_match[['matchId']].first().reset_index(drop=True) for colname in group_stats_cols: for f_name, func in group_funcs.items() : gr_col_name = f_name + '_group_' + colname if(gr_col_name not in all_useless_cols)or(( gr_col_name + '_rank')not in all_useless_cols): if func is not sum: extra_group_stats[gr_col_name] = pl_data_grouped_by_group[colname].agg(func ).values else: extra_group_stats[gr_col_name] = pl_data_grouped_by_group[colname].agg(func ).values * sum_multipliers for colname in match_stats_cols: for f_name, func in match_funcs.items() : m_col_name = f_name + '_match_' + colname if m_col_name not in all_useless_cols: if func is np.std: extra_match_stats[m_col_name] = pl_data_grouped_by_match[colname].agg(func ).fillna(0 ).values elif func is np.min: if m_col_name in min_match_useful_cols: extra_match_stats[m_col_name] = pl_data_grouped_by_match[colname].agg(func ).values else: extra_match_stats[m_col_name] = pl_data_grouped_by_match[colname].agg(func ).values extra_group_stats.set_index(['matchId', 'groupId'], inplace=True) extra_match_stats.set_index(['matchId'], inplace=True) pl_data_grouped_by_group = None pl_data_grouped_by_match = None select_cols = [] for col in extra_group_stats.columns: if(( col + '_rank')not in all_useless_cols)and(col not in ['matchId', 'groupId']): select_cols.append(col) rank_data = extra_group_stats.groupby(['matchId']) rank_data = rank_data[select_cols].rank() - 1 gc.collect() max_rank_data = rank_data.groupby(['matchId'] ).transform(np.max) rank_data = rank_data /(max_rank_data + 0.0001) max_rank_data = None gc.collect() print('rank data created') gr_col_to_drop = list(set(extra_group_stats.columns)& all_useless_cols) extra_group_stats.drop(columns=gr_col_to_drop, inplace=True) gc.collect() extra_group_stats = extra_group_stats.join(rank_data, on=['matchId', 'groupId'], rsuffix='_rank') extra_group_stats.reset_index(level=1, inplace=True) rank_data = None gc.collect() print('rank data merged') extra_group_stats = reduce_mem_usage(extra_group_stats) extra_match_stats = reduce_mem_usage(extra_match_stats) merged_features = extra_group_stats.merge(extra_match_stats, on=['matchId']) extra_group_stats = None extra_match_stats = None gc.collect() print('extra match and group stats merged') merged_features = merged_features.merge(gr_data, on=['matchId', 'groupId']) gr_data = None gc.collect() print('group data and stats merged') cats = merged_features['matchType'].unique() cats = set(cats)- set(all_useless_cols) encoded_data = np.empty(shape=(merged_features.shape[0], 0), dtype=np.int8) for category in cats: encoded_data = np.c_[encoded_data,(merged_features[['matchType']] == category ).values.reshape(-1,1 ).astype(np.int8)] encoded_data = pd.DataFrame(encoded_data, columns=cats, index=merged_features.index, dtype=np.int8) print('matchType data created') for col in encoded_data.columns: merged_features[col] = encoded_data[col] encoded_data = None gc.collect() print('match type data merged') cols_to_drop = ['matchType'] merged_features = merged_features.drop(columns=cols_to_drop) return merged_features<filter>
for key, dataset in data_dict.items() : dataset['Ticket Frequency'] = data_all.groupby('Ticket')['Ticket'].transform('count') data_all = concat_data(train_data, test_data )
Titanic - Machine Learning from Disaster
10,757,188
def remove_outliers(X): outliers =(X['walkDistance'] > 10000)|(X['rideDistance'] > 15000)|(X['swimDistance'] > 1000)|(( X['kills'] > 0)&(X['total_distance'] == 0)) outliers = outliers |(X['kills'] > 30)|(X['longestKill'] > 800)|(X['weaponsAcquired'] > 40) X = X.loc[~outliers] outlier_data = X.loc[outliers] return X, outlier_data<load_from_csv>
X_train = pd.get_dummies(train_data[features]) X_test = pd.get_dummies(test_data[ features]) X_test.head()
Titanic - Machine Learning from Disaster
10,757,188
class DataPipeline() : def __init__(self, pipeline=None): if pipeline is not None: self.rank_means = pipeline.rank_means self.rank_stds = pipeline.rank_stds return def fit_transform(self, data_path): data = pd.read_csv(data_path) for col_name, col_type in data_types.items() : data[col_name] = data[col_name].astype(col_type) data = data.loc[~data['winPlacePerc'].isna() ] data = data.loc[data['maxPlace'] > 1] data, self.rank_means, self.rank_stds = fix_missing_ranks(data) data = add_player_features(data) gr_data, data = create_basic_group_info(data) cols_to_drop = ['kills', 'headshotKills', 'killStreaks', 'walkDistance', 'rideDistance', 'swimDistance', 'heals'] data.drop(columns=cols_to_drop, inplace=True) data_ids = data[['Id', 'matchId', 'groupId']] merged_features = create_group_and_match_stats(data, gr_data) gc.collect() print('final feature dataframe shape:', merged_features.shape) return merged_features.reset_index() , data_ids def transform(self, data_path): data = pd.read_csv(data_path) for col_name, col_type in data_types.items() : if col_name != 'winPlacePerc': data[col_name] = data[col_name].astype(col_type) data = fix_missing_ranks(data, self.rank_means, self.rank_stds)[0] data = add_player_features(data) gr_data, data = create_basic_group_info(data) cols_to_drop = ['kills', 'headshotKills', 'killStreaks', 'walkDistance', 'rideDistance', 'swimDistance', 'heals'] data.drop(columns=cols_to_drop, inplace=True) data_ids = data[['Id', 'matchId', 'groupId']] merged_features = create_group_and_match_stats(data, gr_data) print('final feature dataframe shape:', merged_features.shape) return merged_features.reset_index() , data_ids<create_dataframe>
y = train_data['Survived']
Titanic - Machine Learning from Disaster
10,757,188
<data_type_conversions><EOS>
model = RandomForestClassifier(n_estimators=1800, max_depth=8, min_samples_split=6, min_samples_leaf=6, max_features='auto', oob_score=True, random_state=42, n_jobs=-1, verbose=1) model.fit(X_train, y) predictions = model.predict(X_test) acc_random_forest = round(model.score(X_train, y)* 100, 2) print(acc_random_forest) output = pd.DataFrame({'PassengerID': test_data.PassengerId, 'Survived': predictions}) output = output.convert_dtypes() output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!") output.head()
Titanic - Machine Learning from Disaster
10,257,425
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<define_variables>
%matplotlib inline sns.set_style('whitegrid') ExtraTreesClassifier, GradientBoostingClassifier, VotingClassifier) LogisticRegression, PassiveAggressiveClassifier, RidgeClassifier) data_raw = pd.read_csv('/kaggle/input/titanic/train.csv') data_test = pd.read_csv('/kaggle/input/titanic/test.csv') train = data_raw.copy(deep=True) data_all = [train, data_test]
Titanic - Machine Learning from Disaster
10,257,425
def get_data_batch(batch_size, randomize=False): if randomize: curr_idx = np.random.permutation(train.shape[0]) else: curr_idx = range(train.shape[0]) for batch_n in range(int(np.ceil(train.shape[0] / batch_size))): batch_data = train.iloc[curr_idx[batch_n*batch_size:(batch_n+1)*batch_size]] group_sizes = batch_data['groupSize'].values*std_vals['groupSize'] + mean_vals['groupSize'] yield batch_data.drop(columns=['winPlacePerc', 'matchId', 'groupId'] ).values, batch_data['winPlacePerc'].values, group_sizes<categorify>
for dataset in data_all: dataset.drop(['Ticket', 'Cabin'], axis=1, inplace=True) dataset['Fare'].fillna(dataset['Fare'].median() , inplace=True) dataset['Embarked'].fillna(dataset['Embarked'].mode() [0], inplace=True) for c in set(dataset['Pclass']): for s in set(dataset['Sex']): age_median = dataset[(dataset['Pclass'] == c)&(dataset['Sex'] == s)]['Age'].median() dataset.loc[(dataset['Age'].isnull())&(dataset['Pclass'] == c)&(dataset['Sex'] == s), 'Age'] = age_median train.drop(['PassengerId'], axis=1, inplace=True) def get_titles(series): return series.str.extract('([a-zA-Z]+)\.', expand=False) for dataset in data_all: dataset['FamilySize'] = dataset['Parch'] + dataset['SibSp'] + 1 dataset['IsAlone'] =(dataset['FamilySize'] == 1 ).astype(int) dataset['Title'] = get_titles(dataset['Name']) title_counts = dataset['Title'].value_counts() dataset['Title'] = dataset['Title'].map(lambda t: t if title_counts[t] >= 10 else 'Rare') dataset.drop('Name', axis=1, inplace=True) dataset['FareBin'] = pd.qcut(dataset['Fare'], 4) dataset['AgeBin'] = pd.cut(dataset['Age'], 5) categorical_cols = ['Sex', 'Embarked', 'Title', 'AgeBin', 'FareBin'] numerical_cols = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'FamilySize', 'IsAlone'] le = LabelEncoder() ohe = OneHotEncoder(sparse=False) for dataset in data_all: for col in categorical_cols: dataset[col + '_Code'] = le.fit_transform(dataset[col])
Titanic - Machine Learning from Disaster
10,257,425
dropout_rate = [0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5] def leaky_relu(z, name=None): return tf.maximum(0.01*z, z, name=name) tf.reset_default_graph() dynamic_dropout = tf.placeholder_with_default(0.5, shape=(None), name='dynamic_dropout') X = tf.placeholder(dtype=tf.float32, shape=[None, train.shape[1]-3], name='X') y = tf.placeholder(dtype=tf.float32, shape=(None), name='y') group_sizes = tf.placeholder(dtype=tf.float32, shape=(None), name='group_sizes') training = tf.placeholder_with_default(False, shape=(None), name='training') with tf.name_scope('layers')as scope: hidden1 = tf.layers.dense(X, 100, name='hidden1', kernel_initializer=tf.variance_scaling_initializer()) hidden1 = tf.layers.batch_normalization(hidden1, training=training, momentum=0.9, name='hidden1_bn') hidden1 = tf.nn.elu(hidden1, name='hidden1_activation') hidden2 = tf.layers.dense(hidden1, 60, name='hidden2', kernel_initializer=tf.variance_scaling_initializer()) hidden2 = tf.layers.batch_normalization(hidden2, training=training, momentum=0.9, name='hidden2_bn') hidden2 = tf.nn.elu(hidden2, name='hidden2_activation') hidden3 = tf.layers.dense(hidden2, 60, name='hidden3', kernel_initializer=tf.variance_scaling_initializer()) hidden3 = tf.layers.batch_normalization(hidden3, training=training, momentum=0.9, name='hidden3_bn') hidden3 = tf.nn.elu(hidden3, name='hidden3_activation') hidden4 = tf.layers.dense(hidden3, 60, name='hidden4', kernel_initializer=tf.variance_scaling_initializer()) hidden4 = tf.layers.batch_normalization(hidden4, training=training, momentum=0.9, name='hidden4_bn') hidden4 = tf.nn.elu(hidden4, name='hidden4_activation') hidden5 = tf.layers.dense(hidden4, 60, name='hidden5', kernel_initializer=tf.variance_scaling_initializer()) hidden5 = tf.layers.batch_normalization(hidden5, training=training, momentum=0.9, name='hidden5_bn') hidden5 = tf.nn.elu(hidden5, name='hidden5_activation') hidden5 = tf.layers.dropout(hidden5, rate=dropout_rate[5], training=training, name='dropout5') output = tf.layers.dense(hidden5, 1, name='output', kernel_initializer=tf.variance_scaling_initializer() )<compute_train_metric>
train = train.dropna(axis=0, subset=['Survived']) targ_col = 'Survived' feature_cols = ['Pclass', 'FamilySize', 'IsAlone', 'Sex_Code', 'Embarked_Code', 'Title_Code', 'AgeBin_Code', 'FareBin_Code']
Titanic - Machine Learning from Disaster
10,257,425
with tf.name_scope('loss')as scope: diff_vector = tf.abs(tf.reshape(output, shape=[-1])- y) sum_group_sizes = tf.reduce_sum(group_sizes) loss_mse = tf.reduce_sum(tf.square(diff_vector)*group_sizes)/sum_group_sizes loss_mae = tf.reduce_sum(diff_vector*group_sizes)/sum_group_sizes clipped_diff_vector = tf.clip_by_value(diff_vector, 0.0, 1.0) lin_err = 2*(diff_vector - clipped_diff_vector) loss_hybrid = tf.reduce_sum(( tf.square(clipped_diff_vector)+ lin_err)*group_sizes)/sum_group_sizes with tf.name_scope('training')as scope: lr_high = 0.0008 lr_low = 0.00001 learning_rate = tf.Variable(0.0001, trainable=False, name='learning_rate') optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.6) training_op = optimizer.minimize(loss_hybrid) val_batch_size = 10000 def get_mse_mae(sess, batch_size=10000): y_diffs = np.empty(shape=0) curr_group_sizes = np.empty(shape=0) for X_batch, y_batch, batch_gr_sizes in get_data_batch(batch_size=batch_size): curr_output_vals = sess.run(output, feed_dict = {X:X_batch} ).reshape(-1) y_diffs = np.r_[y_diffs, np.abs(y_batch - curr_output_vals)] curr_group_sizes = np.r_[curr_group_sizes, batch_gr_sizes] loss_mae_val = np.sum(y_diffs*curr_group_sizes)/np.sum(curr_group_sizes) loss_mse_val = np.sum(( y_diffs**2)*curr_group_sizes)/np.sum(curr_group_sizes) return loss_mse_val, loss_mae_val extra_training_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) init = tf.global_variables_initializer() saver = tf.train.Saver()<init_hyperparams>
classifiers = [ KNeighborsClassifier(**{'n_jobs': -1, 'n_neighbors': 7}), SVC(**{'C': 0.390625, 'kernel': 'poly', 'probability': True, 'random_state': 0}), LinearSVC(**{'C': 25.0, 'dual': False, 'loss': 'squared_hinge', 'max_iter': 25000, 'penalty': 'l1', 'random_state': 0}), DecisionTreeClassifier(**{'criterion': 'entropy', 'max_depth': 10, 'min_samples_split': 8, 'random_state': 0}), RandomForestClassifier(**{'criterion': 'entropy', 'max_depth': 12, 'min_samples_split': 8, 'n_estimators': 300, 'n_jobs': -1, 'random_state': 0}), AdaBoostClassifier(**{'algorithm': 'SAMME', 'learning_rate': 0.3, 'n_estimators': 300, 'random_state': 0}), BaggingClassifier(**{'max_samples': 0.1, 'n_estimators': 50, 'n_jobs': -1, 'random_state': 0}), ExtraTreesClassifier(**{'criterion': 'gini', 'max_depth': 12, 'min_samples_split': 12, 'n_estimators': 200, 'n_jobs': -1, 'random_state': 0}), GradientBoostingClassifier(**{'learning_rate': 0.3, 'max_depth': 2, 'min_samples_split': 2, 'n_estimators': 10, 'random_state': 0}), BernoulliNB(**{'alpha': 0.1}), GaussianNB() , LinearDiscriminantAnalysis(**{'shrinkage': None, 'solver': 'svd'}), QuadraticDiscriminantAnalysis() , Perceptron(**{'early_stopping': True, 'n_jobs': -1, 'penalty': 'l2', 'random_state': 0}), LogisticRegression(**{'C': 0.09765625, 'max_iter': 20, 'n_jobs': -1, 'penalty': 'l2', 'random_state': 0, 'solver': 'lbfgs'}), PassiveAggressiveClassifier(**{'C': 0.01220703125, 'early_stopping': True, 'loss': 'hinge', 'max_iter': 5, 'n_jobs': -1, 'random_state': 0}), RidgeClassifier(**{'alpha': 0.5, 'normalize': True, 'random_state': 0}), GaussianProcessClassifier(**{'max_iter_predict': 10, 'n_jobs': -1, 'random_state': 0}), XGBClassifier(**{'booster': 'gbtree', 'learning_rate': 0.1, 'max_depth': 10, 'n_jobs': -1, 'random_state': 0, 'reg_alpha': 0.16, 'reg_lambda': 2.56}), LGBMClassifier(**{'boosting_type': 'goss', 'learning_rate': 0.1, 'n_estimators': 1000, 'n_jobs': -1, 'random_state': 0, 'reg_alpha': 0.16, 'reg_lambda': 0}), ] skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=0 )
Titanic - Machine Learning from Disaster
10,257,425
gc.collect() batch_size = 5000 n_epochs = 1000 max_epochs_wo_improvement = 50 max_epochs_wo_lr_change = 10 max_time = 2 loss_data = {'mse_train':[], 'mae_train':[], 'lr':[]} start_time = datetime.datetime.now() def get_dropout(initial_dropout, epoch): return np.maximum(0, initial_dropout*(1 - epoch/200)) with tf.Session() as sess: init.run() min_loss_mae = np.inf epochs_wo_improvement = 0 epochs_since_lr_change = 0 batch_lr = lr_high for epoch in range(n_epochs): for X_batch, y_batch, batch_gr_sizes in get_data_batch(batch_size=batch_size, randomize=True): feed_dict = {X: X_batch, y: y_batch, training:True, learning_rate:batch_lr, group_sizes:batch_gr_sizes} sess.run([training_op, extra_training_ops], feed_dict=feed_dict) loss_mse_train, loss_mae_train = get_mse_mae(sess) loss_data['mse_train'].append(loss_mse_train) loss_data['mae_train'].append(loss_mae_train) loss_data['lr'].append(batch_lr) print('epoch', epoch, 'time passed:',(datetime.datetime.now() - start_time), 'learning rate: {:<5.5f}'.format(batch_lr)) print('MSE:', loss_mse_train, 'MAE:', loss_mae_train) if loss_mae_train <= min_loss_mae: epochs_wo_improvement = 0 epochs_since_lr_change = 0 min_loss_mae = loss_mae_train save_path = saver.save(sess, '.. /DNN_data/dnn_state.ckpt') print('- best so far!') else: epochs_wo_improvement += 1 epochs_since_lr_change += 1 if(epochs_wo_improvement > max_epochs_wo_improvement)or(( datetime.datetime.now() - start_time ).seconds > max_time*60*60): print('early breaking!') break if epochs_since_lr_change >= max_epochs_wo_lr_change: batch_lr = batch_lr -(batch_lr - lr_low)*0.3 epochs_since_lr_change = 0 saver.restore(sess, save_path) loss_mse_val, loss_mae_val = get_mse_mae(sess) print('Final MSE:', loss_mse_val, 'final MAE:', loss_mae_val )<drop_column>
clf_scores = pd.DataFrame(columns=['Classifier', 'Test Score', 'Test Score 3*STD']) clf_preds = pd.DataFrame(train[targ_col]) for i in range(len(classifiers)) : clf = classifiers[i] clf_name = clf.__class__.__name__ clf.fit(train[feature_cols], train[targ_col]) cv_results = cross_val_score(clf, train[feature_cols], train[targ_col], cv=skf) clf_scores.loc[i] = [clf_name, cv_results.mean() , cv_results.std() * 3] clf_preds[clf_name] = clf.predict(train[feature_cols]) clf_scores
Titanic - Machine Learning from Disaster
10,257,425
group_cols = [col for col in train.columns if re.search(r'group', col)is not None] group_cols = [col for col in group_cols if re.search(r'_rank', col)is None] group_cols.remove('groupId') other_cols = [col for col in train.columns if re.search(r'match|group', col)is None] other_cols.remove('num_opponents') other_cols.remove('winPlacePerc' )<data_type_conversions>
def print_scores_info(model_name, scores): mean = scores.mean() * 100 std_3 = scores.std() * 100 * 3 print(model_name, 'score mean: ', mean) print(model_name, 'score 3 std range: ', mean - std_3, '—', mean + std_3 )
Titanic - Machine Learning from Disaster
10,257,425
<define_search_space><EOS>
vote_classifiers = [ ('knn', KNeighborsClassifier(**{'n_jobs': -1, 'n_neighbors': 7})) , ('bag', BaggingClassifier(**{'max_samples': 0.1, 'n_estimators': 50, 'n_jobs': -1, 'random_state': 0})) , ('gbc', GradientBoostingClassifier(**{'learning_rate': 0.3, 'max_depth': 2, 'min_samples_split': 2, 'n_estimators': 10, 'random_state': 0})) , ('bnb', BernoulliNB(**{'alpha': 0.1})) , ('gnb', GaussianNB()), ('lda', LinearDiscriminantAnalysis(**{'shrinkage': None, 'solver': 'svd'})) , ('qda', QuadraticDiscriminantAnalysis()), ('log', LogisticRegression(**{'C': 0.09765625, 'max_iter': 20, 'n_jobs': -1, 'penalty': 'l2', 'random_state': 0, 'solver': 'lbfgs'})) , ('gpc', GaussianProcessClassifier(**{'max_iter_predict': 10, 'n_jobs': -1, 'random_state': 0})) , ('xgb', XGBClassifier(**{'booster': 'gbtree', 'learning_rate': 0.1, 'max_depth': 10, 'n_jobs': -1, 'random_state': 0, 'reg_alpha': 0.16, 'reg_lambda': 2.56})) , ] vote_hard_clf = VotingClassifier(vote_classifiers, voting='hard') print_scores_info('Vote Hard', cross_val_score(vote_hard_clf, train[feature_cols], train[targ_col], cv=skf)) vote_hard_clf.fit(train[feature_cols], train[targ_col]) preds_test = vote_hard_clf.predict(data_test[feature_cols]) output = pd.DataFrame({'PassengerId': data_test.PassengerId, 'Survived': preds_test}) output.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
2,403,150
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<drop_column>
pd.options.mode.chained_assignment = None warnings.filterwarnings('ignore' )
Titanic - Machine Learning from Disaster
2,403,150
train_array = train.drop(columns=['matchId', 'groupId', 'winPlacePerc'] ).values<define_variables>
train = pd.read_csv('.. /input/train.csv', header=0) test = pd.read_csv('.. /input/test.csv', header=0) test.insert(1,'Survived',np.nan) all = pd.concat([train, test] )
Titanic - Machine Learning from Disaster
2,403,150
group_col_inds = [train.columns.drop(['matchId', 'groupId', 'winPlacePerc'] ).get_loc(col)for col in group_cols] other_col_inds = [train.columns.drop(['matchId', 'groupId', 'winPlacePerc'] ).get_loc(col)for col in other_cols] def get_comp_batch(inds, batch_size, omit_last=True): rand_inds = np.random.permutation(inds.shape[0]) if omit_last: func = np.floor else: func = np.ceil for batch_n in range(int(func(inds.shape[0]/batch_size))): batch_inds = inds[rand_inds[batch_n*batch_size:(batch_n+1)*batch_size]] X1_batch = train_array[batch_inds[:,0]][:,group_col_inds] X2_batch = train_array[batch_inds[:,1]][:,group_col_inds] X3_batch = train_array[batch_inds[:,0]][:,other_col_inds] yield X1_batch, X2_batch, X3_batch<categorify>
all['Title'] = all.Name.str.extract('([A-Za-z]+)\.', expand=False )
Titanic - Machine Learning from Disaster
2,403,150
dropout_rate = [0.0, 0.0, 0.0, 0.0, 0.1, 0.2, 0.2, 0.2] layer_sizes = [100, 100, 60, 60, 60, 30, 30, 30] tf.reset_default_graph() def leaky_relu(z, name=None): return tf.maximum(0.01*z, z, name=name) X1 = tf.placeholder(dtype=tf.float32, shape=[None, len(group_cols)], name='X1') X2 = tf.placeholder(dtype=tf.float32, shape=[None, len(group_cols)], name='X2') X3 = tf.placeholder(dtype=tf.float32, shape=[None, len(other_cols)], name='X3') X = tf.concat([tf.concat([X1, X2, X3], axis=1), tf.concat([X2, X1, X3], axis=1)], axis=0) y = tf.placeholder(dtype=tf.float32, shape=(None), name='y') y_all = tf.concat([y, 1-y], axis=0) training = tf.placeholder_with_default(False, shape=(None), name='training') with tf.name_scope('layers')as scope: hidden = tf.layers.dropout(X, rate=dropout_rate[0], training=training, name='input_dropout') for i in range(8): hidden = tf.layers.dense(hidden, layer_sizes[i], name=f'hidden_{i}', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) hidden = tf.layers.batch_normalization(hidden, training=training, momentum=0.9, name=f'hidden_{i}_bn') hidden = tf.nn.elu(hidden, name=f'hidden_{i}_activation') hidden = leaky_relu(hidden, name=f'hidden_{i}_activation') hidden = tf.layers.dropout(hidden, rate=dropout_rate[i], training=training, name=f'dropout_{i}') logits = tf.layers.dense(hidden, 1, name='logits', kernel_initializer=tf.contrib.layers.variance_scaling_initializer()) output = tf.nn.sigmoid(logits, name='output' )<choose_model_class>
all.loc[all['Title'].isin(['Ms','Mlle']), 'Title'] = 'Miss' all.loc[all['Title'].isin(['Mme','Lady','Dona','Countess']), 'Title'] = 'Mrs' all.loc[all['Title'].isin(['Col','Major','Sir','Rev','Capt','Don','Jonkheer']), 'Title'] = 'Mr' all.loc[(all['Title'] == 'Dr')&(all['Sex'] == 'male'),'Title'] = 'Mr' all.loc[(all['Title'] == 'Dr')&(all['Sex'] == 'female'),'Title'] = 'Mrs'
Titanic - Machine Learning from Disaster
2,403,150
with tf.name_scope('loss')as scope: xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_all,logits=tf.reshape(logits, [-1]),name='xentropy') loss = tf.reduce_mean(xentropy) with tf.name_scope('training')as scope: lr_low = 0.00001 lr_high = 0.003 lr_high_2 = 0.0003 decay_rate = 15 lr = tf.Variable(lr_high, trainable=False, name='learning_rate') optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-10) training_op = optimizer.minimize(loss) with tf.name_scope('evaluation')as scope: accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(tf.reshape(output,[-1])) ,y_all), tf.float32), name='accuracy') def get_full_set_accuracy(sess, inds, batch_size=1000): y_batch = np.ones(shape=[batch_size]) y_for_comp = np.r_[y_batch, 1 - y_batch] acc_data = np.empty(shape=0) for X1_batch, X2_batch, X3_batch in get_comp_batch(inds=inds, batch_size=batch_size): feed_dict = {X1:X1_batch, X2:X2_batch, X3:X3_batch, y:y_batch} y_pred = sess.run(output, feed_dict=feed_dict ).reshape(-1) batch_acc_data =(( y_pred > 0.5 ).astype(np.float32)== y_for_comp ).astype(np.float32) acc_data = np.r_[acc_data, batch_acc_data] return np.mean(acc_data) extra_training_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) init = tf.global_variables_initializer() saver_comp = tf.train.Saver()<define_variables>
all['FamSize'] = all.apply(lambda s: 1+s['SibSp']+s['Parch'], axis = 1) all['isAlone'] = all.apply(lambda s: 1 if s['FamSize'] == 1 else 0, axis = 1 )
Titanic - Machine Learning from Disaster