kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
467,142 | train.drop(train[train['roadKills'] > 10].index, inplace=True)
<drop_column> | train.drop(['Ticket'], axis=1, inplace=True)
test.drop(['Ticket'], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
467,142 | train.drop(train[train['kills'] > 30].index, inplace=True )<drop_column> | train['calculated_fare'] = train.Fare/train.family_size
test['calculated_fare'] = test.Fare/test.family_size | Titanic - Machine Learning from Disaster |
467,142 | train.drop(train[train['longestKill'] >= 1000].index, inplace=True )<drop_column> | def fare_group(fare):
a= ''
if fare <= 4:
a = 'Very_low'
elif fare <= 10:
a = 'low'
elif fare <= 20:
a = 'mid'
elif fare <= 45:
a = 'high'
else:
a = "very_high"
return a
train['fare_group'] = train['calculated_fare'].map(fare_group)
test['fare_group'] = test['calculated_fare'].map(fare_group)
| Titanic - Machine Learning from Disaster |
467,142 | train.drop(train[train['walkDistance'] >= 10000].index, inplace=True )<drop_column> | train.drop(['PassengerId'], axis=1, inplace=True)
test.drop(['PassengerId'], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
467,142 | train.drop(train[train['rideDistance'] >= 20000].index, inplace=True )<filter> | train = pd.get_dummies(train, columns=['title',"Pclass", 'Cabin','Embarked','nLength_group', 'family_group', 'fare_group'], drop_first=False)
test = pd.get_dummies(test, columns=['title',"Pclass",'Cabin','Embarked','nLength_group', 'family_group', 'fare_group'], drop_first=False)
train.drop(['family_size','Name', 'Fare','name_length'], axis=1, inplace=True)
test.drop(['Name','family_size',"Fare",'name_length'], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
467,142 | train[train['swimDistance'] >= 2000]<drop_column> | train = pd.concat([train[["Survived", "Age", "Sex","SibSp","Parch"]], train.loc[:,"is_alone":]], axis=1)
test = pd.concat([test[["Age", "Sex"]], test.loc[:,"SibSp":]], axis=1 ) | Titanic - Machine Learning from Disaster |
467,142 | train.drop(train[train['swimDistance'] >= 2000].index, inplace=True )<drop_column> | def completing_age(df):
age_df = df.loc[:,"Age":]
temp_train = age_df.loc[age_df.Age.notnull() ]
temp_test = age_df.loc[age_df.Age.isnull() ]
y = temp_train.Age.values
x = temp_train.loc[:, "Sex":].values
rfr = RandomForestRegressor(n_estimators=1500, n_jobs=-1)
rfr.fit(x, y)
predicted_age = rfr.predict(temp_test.loc[:, "Sex":])
df.loc[df.Age.isnull() , "Age"] = predicted_age
return df
completing_age(train)
completing_age(test); | Titanic - Machine Learning from Disaster |
467,142 | train.drop(train[train['weaponsAcquired'] >= 80].index, inplace=True )<drop_column> | def age_group_fun(age):
a = ''
if age <= 1:
a = 'infant'
elif age <= 4:
a = 'toddler'
elif age <= 13:
a = 'child'
elif age <= 18:
a = 'teenager'
elif age <= 35:
a = 'Young_Adult'
elif age <= 45:
a = 'adult'
elif age <= 55:
a = 'middle_aged'
elif age <= 65:
a = 'senior_citizen'
else:
a = 'old'
return a
train['age_group'] = train['Age'].map(age_group_fun)
test['age_group'] = test['Age'].map(age_group_fun)
train = pd.get_dummies(train,columns=['age_group'], drop_first=True)
test = pd.get_dummies(test,columns=['age_group'], drop_first=True); | Titanic - Machine Learning from Disaster |
467,142 | train.drop(train[train['heals'] >= 40].index, inplace=True )<count_unique_values> | X = train.drop(['Survived'], axis = 1)
y = train["Survived"] | Titanic - Machine Learning from Disaster |
467,142 | print('There are {} different Match types in the dataset.'.format(train['matchType'].nunique()))<categorify> | X_train, X_test, y_train, y_test = train_test_split(X, y,test_size =.33, random_state=0 ) | Titanic - Machine Learning from Disaster |
467,142 | train = pd.get_dummies(train, columns=['matchType'])
matchType_encoding = train.filter(regex='matchType')
matchType_encoding.head()<data_type_conversions> | std_scale = StandardScaler()
X_train = std_scale.fit_transform(X_train)
X_test = std_scale.transform(X_test)
| Titanic - Machine Learning from Disaster |
467,142 | train['groupId'] = train['groupId'].astype('category')
train['matchId'] = train['matchId'].astype('category')
train['groupId_cat'] = train['groupId'].cat.codes
train['matchId_cat'] = train['matchId'].cat.codes
train.drop(columns=['groupId', 'matchId'], inplace=True)
train[['groupId_cat', 'matchId_cat']].head()<drop_column> | logreg = LogisticRegression(solver='liblinear',
penalty= 'l1',random_state = 42
)
logreg.fit(X_train,y_train)
y_pred = logreg.predict(X_test)
| Titanic - Machine Learning from Disaster |
467,142 | train.drop(columns = ['Id'], inplace=True )<feature_engineering> | pd.DataFrame(confusion_matrix(y_test,y_pred),\
columns=["Predicted Not-Survived", "Predicted Survived"],\
index=["Not-Survived","Survived"] ) | Titanic - Machine Learning from Disaster |
467,142 | test['totalDistance'] = test['rideDistance'] + test['walkDistance'] + test['swimDistance']
test['healsAndBoosts'] = test['heals'] + test['boosts']
test['team'] = [1 if i>50 else 2 if(i>25 & i<=50)else 4 for i in test['numGroups']]
test['playersJoined'] = test.groupby('matchId')['matchId'].transform('count')
test['killsNorm'] = test['kills']*(( 100-test['playersJoined'])/100 + 1)
test['damageDealtNorm'] = test['damageDealt']*(( 100-test['playersJoined'])/100 + 1)
test['maxPlaceNorm'] = test['maxPlace']*(( 100-train['playersJoined'])/100 + 1)
test['matchDurationNorm'] = test['matchDuration']*(( 100-test['playersJoined'])/100 + 1)
test['headshot_rate'] = test['headshotKills'] / test['kills']
test['headshot_rate'] = test['headshot_rate'].fillna(0)
test['killsWithoutMoving'] =(( test['kills'] > 0)&(test['totalDistance'] == 0))
test = pd.get_dummies(test, columns=['matchType'])
test['groupId'] = test['groupId'].astype('category')
test['matchId'] = test['matchId'].astype('category')
test['groupId_cat'] = test['groupId'].cat.codes
test['matchId_cat'] = test['matchId'].cat.codes
test.drop(columns=['groupId', 'matchId'], inplace=True )<prepare_x_and_y> | y_pred = logreg.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cm_display = ConfusionMatrixDisplay(cm ).plot() | Titanic - Machine Learning from Disaster |
467,142 | X = train_sample.drop(columns=['winPlacePerc'])
y = train_sample['winPlacePerc']<split> | accuracy_score(y_test, y_pred ) | Titanic - Machine Learning from Disaster |
467,142 | def split_vals(a, n : int):
return a[:n].copy() , a[n:].copy()
val_perc = 0.12
n_valid = int(val_perc * sample)
n_trn = len(X)- n_valid
raw_train, raw_valid = split_vals(train_sample, n_trn)
X_train, X_valid = split_vals(X, n_trn)
y_train, y_valid = split_vals(y, n_trn)
print('Sample train shape: ', X_train.shape,
'Sample target shape: ', y_train.shape,
'Sample validation shape: ', X_valid.shape )<compute_test_metric> | recall_score(y_test, y_pred ) | Titanic - Machine Learning from Disaster |
467,142 | def print_score(m : xgb):
res = ['mae train: ', mean_absolute_error(m.predict(X_train), y_train),
'mae val: ', mean_absolute_error(m.predict(X_valid), y_valid)]
if hasattr(m, 'oob_score_'): res.append(m.oob_score_)
print(res )<compute_test_metric> | precision_score(y_test, y_pred ) | Titanic - Machine Learning from Disaster |
467,142 |
<choose_model_class> | print(classification_report(y_test, y_pred)) | Titanic - Machine Learning from Disaster |
467,142 | rnd_mod_1 = RandomForestRegressor(n_estimators=40, min_samples_leaf=3, max_features='sqrt', n_jobs=-1)
<train_model> | cv = StratifiedShuffleSplit(n_splits = 10, test_size =.25, random_state = 0)
column_names = X.columns
X = std_scale.fit_transform(X)
accuracies = cross_val_score(LogisticRegression(solver='liblinear'), X,y, cv = cv)
print("Cross-Validation accuracy scores:{}".format(accuracies))
print("Mean Cross-Validation accuracy score: {}".format(round(accuracies.mean() ,5)) ) | Titanic - Machine Learning from Disaster |
467,142 | rnd_mod_1.fit(X_train, y_train)
print_score(rnd_mod_1)
<features_selection> | C_vals = [0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]
cv = StratifiedShuffleSplit(n_splits = 10, test_size =.25)
param = {'C': C_vals}
logreg = LogisticRegression()
grid = GridSearchCV(
estimator=LogisticRegression() ,
param_grid = param,
scoring = 'accuracy',
n_jobs =-1,
cv = cv
)
grid.fit(X, y ) | Titanic - Machine Learning from Disaster |
467,142 | to_keep = fi[fi.imp>0.005].cols
print('Significant features: ', len(to_keep))
to_keep<split> | print(grid.best_score_)
print(grid.best_params_)
print(grid.best_estimator_)
| Titanic - Machine Learning from Disaster |
467,142 | X_keep = X[to_keep].copy()
X_train, X_valid = split_vals(X_keep, n_trn )<train_model> | logreg_grid = grid.best_estimator_
logreg_grid.score(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | rnd_mod_2.fit(X_train, y_train)
print_score(rnd_mod_2 )<split> | knn = KNeighborsClassifier(metric='minkowski', p=2)
cv = StratifiedShuffleSplit(n_splits=10, test_size=.25, random_state=2)
accuracies = cross_val_score(knn, X,y, cv = cv, scoring='accuracy')
print("Cross-Validation accuracy scores:{}".format(accuracies))
print("Mean Cross-Validation accuracy score: {}".format(round(accuracies.mean() ,3)) ) | Titanic - Machine Learning from Disaster |
467,142 | val_perc_full = 0.12
n_valid_full = int(val_perc_full * len(train))
n_trn_full = len(train)-n_valid_full
X_full = train.drop(columns = ['winPlacePerc'])
y = train['winPlacePerc']
X_full = X_full[to_keep]
X_train, X_valid = split_vals(X_full, n_trn_full)
y_train, y_valid = split_vals(y, n_trn_full)
print('Sample train shape: ', X_train.shape,
'Sample target shape: ', y_train.shape,
'Sample validation shape: ', X_valid.shape )<train_model> | k_range = range(1,31)
k_scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(knn, X,y, cv = cv, scoring = 'accuracy')
k_scores.append(scores.mean())
print("Accuracy scores are: {}
".format(k_scores))
print("Mean accuracy score: {}".format(np.mean(k_scores)))
| Titanic - Machine Learning from Disaster |
467,142 | rnd_mod_3.fit(X_train, y_train)
print_score(rnd_mod_3 )<prepare_output> | k_range = range(1,31)
weights_options=['uniform','distance']
param = {'n_neighbors':k_range, 'weights':weights_options}
cv = StratifiedShuffleSplit(n_splits=10, test_size=.30, random_state=15)
grid = GridSearchCV(KNeighborsClassifier() , param,cv=cv,verbose = False, n_jobs=-1)
grid.fit(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | test_pred = test[to_keep].copy()
test_pred.fillna(0, inplace=True)
test_pred.head()<save_to_csv> | print(grid.best_score_)
print(grid.best_params_)
print(grid.best_estimator_ ) | Titanic - Machine Learning from Disaster |
467,142 | predictions = np.clip(a = rnd_mod_3.predict(test_pred), a_min = 0.0, a_max = 1.0)
pred_df = pd.DataFrame({'Id' : test['Id'], 'winPlacePerc' : predictions})
pred_df.to_csv("submission.csv", index=False )<string_transform> | knn_grid= grid.best_estimator_
knn_grid.score(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | def take_part_of_data(df, part):
match_ids = df['matchId'].unique()
match_ids_part = np.random.choice(match_ids, int(part * len(match_ids)))
df = df[df['matchId'].isin(match_ids_part)]
del match_ids
del match_ids_part<feature_engineering> | k_range = range(1,31)
weights_options=['uniform','distance']
param = {'n_neighbors':k_range, 'weights':weights_options}
cv = StratifiedShuffleSplit(n_splits=10, test_size=.30)
grid = RandomizedSearchCV(KNeighborsClassifier() , param,cv=cv,verbose = False, n_jobs=-1, n_iter=40)
grid.fit(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | def add_new_features_1(df):
df['totalDistance'] = df['rideDistance'] + df['walkDistance'] + df['swimDistance']
df['healsAndBoosts'] = df['heals'] + df['boosts']
df['headshotKillsOverKills'] = df['headshotKills'] / df['kills']
df['headshotKillsOverKills'].fillna(0, inplace=True)
df['killStreaksOverKills'] = df['killStreaks'] / df['kills']
df['killStreaksOverKills'].fillna(0, inplace=True)
df['killsAndAssists'] = df['kills'] + df['assists']
df['assistsAndRevives'] = df['assists'] + df['revives']<feature_engineering> | print(grid.best_score_)
print(grid.best_params_)
print(grid.best_estimator_ ) | Titanic - Machine Learning from Disaster |
467,142 | def add_new_features_2(df):
df['playersJoined'] = df.groupby('matchId')['matchId'].transform('count')
df['killsAndAssistsOverPlayersJoined'] = df['killsAndAssists'] *(( 100 - df['playersJoined'])/ 100 + 1)
df['matchDurationOverPlayersJoined'] = df['matchDuration'] *(( 100 - df['playersJoined'])/ 100 + 1)
df['damageDealtOverPlayersJoined'] = df['damageDealt'] *(( 100 - df['playersJoined'])/ 100 + 1 )<feature_engineering> | knn_ran_grid = grid.best_estimator_
knn_ran_grid.score(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | def add_new_features_3(df):
df['totalDistanceOverKillsAndAssists'] = df['totalDistance'] / df['killsAndAssists']
df['totalDistanceOverKillsAndAssists'].fillna(0, inplace=True)
df['totalDistanceOverKillsAndAssists'].replace(np.inf, 0, inplace=True)
df['totalDistanceOverHealsAndBoosts'] = df['totalDistance'] / df['healsAndBoosts']
df['totalDistanceOverHealsAndBoosts'].fillna(0, inplace=True)
df['totalDistanceOverHealsAndBoosts'].replace(np.inf, 0, inplace=True )<feature_engineering> | gaussian = GaussianNB()
gaussian.fit(X, y)
y_pred = gaussian.predict(X_test)
gaussian_accy = round(accuracy_score(y_pred, y_test), 3)
print(gaussian_accy ) | Titanic - Machine Learning from Disaster |
467,142 | def add_new_features_4(df):
df['headshotRate'] = df['kills'] / df['headshotKills']
df['killStreakRate'] = df['killStreaks'] / df['kills']
df['healsAndBoosts'] = df['heals'] + df['boosts']
df['totalDistance'] = df['rideDistance'] + df['walkDistance'] + df['swimDistance']
df['killPlaceOverMaxPlace'] = df['killPlace'] / df['maxPlace']
df['headshotKillsOverKills'] = df['headshotKills'] / df['kills']
df['distanceOverWeapons'] = df['totalDistance'] / df['weaponsAcquired']
df['walkDistanceOverHeals'] = df['walkDistance'] / df['heals']
df['walkDistanceOverKills'] = df['walkDistance'] / df['kills']
df['killsPerWalkDistance'] = df['kills'] / df['walkDistance']
df["skill"] = df['headshotKills'] + df['roadKills']
df[df == np.Inf] = np.NaN
df[df == np.NINF] = np.NaN
df.fillna(0, inplace=True )<prepare_x_and_y> | Cs = [0.001, 0.01, 0.1, 1,1.5,2,2.5,3,4,5, 10]
gammas = [0.0001,0.001, 0.01, 0.1, 1]
param_grid = {'C': Cs, 'gamma' : gammas}
cv = StratifiedShuffleSplit(n_splits=10, test_size=.30, random_state=15)
grid_search = GridSearchCV(SVC(kernel = 'rbf', probability=True), param_grid, cv=cv)
grid_search.fit(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | def feature_engineering(df, is_train=True):
df['rankPoints'] = np.where(df['rankPoints'] <= 0, 0, df['rankPoints'])
features = list(df.columns)
features.remove("matchId")
features.remove("groupId")
features.remove("matchDuration")
features.remove("matchType")
if 'winPlacePerc' in features:
features.remove('winPlacePerc')
y = None
if is_train:
y = df.groupby(['matchId','groupId'])['winPlacePerc'].agg('mean')
elif 'winPlacePerc' in df.columns:
y = df['winPlacePerc']
agg = df.groupby(['matchId','groupId'])[features].agg('mean')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
if is_train:
df_out = agg.reset_index() [['matchId','groupId']]
else:
df_out = df[['matchId','groupId']]
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_mean", "_mean_rank"], how='left', on=['matchId', 'groupId'])
agg = df.groupby(['matchId','groupId'])[features].agg('max')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_max", "_max_rank"], how='left', on=['matchId', 'groupId'])
agg = df.groupby(['matchId','groupId'])[features].agg('min')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_min", "_min_rank"], how='left', on=['matchId', 'groupId'])
agg = df.groupby(['matchId','groupId'] ).size().reset_index(name='group_size')
df_out = df_out.merge(agg, how='left', on=['matchId', 'groupId'])
agg = df.groupby(['matchId'])[features].agg('mean' ).reset_index()
df_out = df_out.merge(agg, suffixes=["", "_match_mean"], how='left', on=['matchId'])
agg = df.groupby(['matchId'] ).size().reset_index(name='match_size')
df_out = df_out.merge(agg, how='left', on=['matchId'])
df_out.drop(["matchId", "groupId"], axis=1, inplace=True)
del agg, agg_rank
return df_out, y<predict_on_test> | print(grid_search.best_score_)
print(grid_search.best_params_)
print(grid_search.best_estimator_ ) | Titanic - Machine Learning from Disaster |
467,142 | class Estimator(object):
def fit(self, x_train, y_train, x_valid, y_valid):
raise NotImplementedException
def predict(self, x):
raise NotImplementedException<train_model> | svm_grid = grid_search.best_estimator_
svm_grid.score(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | class ScikitLearnEstimator(Estimator):
def __init__(self, estimator):
self.estimator = estimator
def fit(self, x_train, y_train, x_valid, y_valid):
self.estimator.fit(x_train, y_train)
def predict(self, x):
return self.estimator.predict(x )<train_model> | max_depth = range(1,30)
max_feature = [21,22,23,24,25,26,28,29,30,'auto']
criterion=["entropy", "gini"]
param = {'max_depth':max_depth,
'max_features':max_feature,
'criterion': criterion}
grid = GridSearchCV(DecisionTreeClassifier() ,
param_grid = param,
verbose=False,
cv=StratifiedShuffleSplit(n_splits=20, random_state=15),
n_jobs = -1)
grid.fit(X, y ) | Titanic - Machine Learning from Disaster |
467,142 | def fit_predict_step(estimator, x_train, y_train, train_idx, valid_idx, x_test, oof):
x_train_train = x_train[train_idx]
y_train_train = y_train[train_idx]
x_train_valid = x_train[valid_idx]
y_train_valid = y_train[valid_idx]
estimator.fit(x_train_train, y_train_train, x_train_valid, y_train_valid)
oof_part = estimator.predict(x_train_valid)
print('MAE:', mean_absolute_error(y_train_valid, oof_part))
oof[valid_idx] = oof_part
y_part = estimator.predict(x_test)
return y_part<predict_on_test> | print(grid.best_params_)
print(grid.best_score_)
print(grid.best_estimator_ ) | Titanic - Machine Learning from Disaster |
467,142 | def fit_predict(estimator, x_train, y_train, x_test):
oof = np.zeros(x_train.shape[0])
y = np.zeros(x_test.shape[0])
kf = KFold(n_splits=5, random_state=42)
for train_idx, valid_idx in kf.split(x_train):
y_part = fit_predict_step(estimator, x_train, y_train, train_idx, valid_idx, x_test, oof)
y += y_part / kf.n_splits
print('Final MAE:', mean_absolute_error(y_train, oof))
return oof, y<train_model> | dectree_grid = grid.best_estimator_
dectree_grid.score(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | def fit_step(estimator, x_train, y_train, train_idx, valid_idx, oof):
x_train_train = x_train[train_idx]
y_train_train = y_train[train_idx]
x_train_valid = x_train[valid_idx]
y_train_valid = y_train[valid_idx]
estimator.fit(x_train_train, y_train_train, x_train_valid, y_train_valid)
oof_part = estimator.predict(x_train_valid)
mae = mean_absolute_error(y_train_valid, oof_part)
print('MAE:', mae)
oof[valid_idx] = oof_part
return estimator, mae<train_model> | n_estimators = [140,145,150,155,160];
max_depth = range(1,10);
criterions = ['gini', 'entropy'];
cv = StratifiedShuffleSplit(n_splits=10, test_size=.30, random_state=15)
parameters = {'n_estimators':n_estimators,
'max_depth':max_depth,
'criterion': criterions
}
grid = GridSearchCV(estimator=RandomForestClassifier(max_features='auto'),
param_grid=parameters,
cv=cv,
n_jobs = -1)
grid.fit(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | def fit(estimator, x_train, y_train):
oof = np.zeros(x_train.shape[0])
kf = KFold(n_splits=5, random_state=42)
trained_estimators = []
for train_idx, valid_idx in kf.split(x_train):
e, mae = fit_step(estimator, x_train, y_train, train_idx, valid_idx, oof)
trained_estimators.append(deepcopy(e))
print('Final MAE:', mean_absolute_error(y_train, oof))
return oof, trained_estimators<predict_on_test> | print(grid.best_score_)
print(grid.best_params_)
print(grid.best_estimator_ ) | Titanic - Machine Learning from Disaster |
467,142 | def predict(trained_estimators, x_test):
y = np.zeros(x_test.shape[0])
for estimator in trained_estimators:
y_part = estimator.predict(x_test)
y += y_part / len(trained_estimators)
return y<train_model> | rf_grid = grid.best_estimator_
rf_grid.score(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | def pipeline_fit(estimator, df_train, scaler=None):
add_new_features_4(df_train)
x_train, y_train = feature_engineering(df_train, is_train=True)
x_train = reduce_mem_usage(x_train)
gc.collect()
if not(scaler is None):
scaler.fit(x_train)
scaled_x_train = scaler.transform(x_train)
else:
scaled_x_train = x_train.values
oof, trained_estimators = fit(estimator, scaled_x_train, y_train.values)
del x_train
del scaled_x_train
del y_train
gc.collect()
return oof, trained_estimators<find_best_model_class> | print(classification_report(y_test, y_pred, labels=rf_grid.classes_)) | Titanic - Machine Learning from Disaster |
467,142 | def pipeline_predict(trained_estimators, df_test, scaler=None):
add_new_features_4(df_test)
x_test, _ = feature_engineering(df_test, is_train=False)
x_test = reduce_mem_usage(x_test)
gc.collect()
if not(scaler is None):
scaled_x_test = scaler.transform(x_test)
else:
scaled_x_test = x_test.values
y = predict(trained_estimators, scaled_x_test)
del x_test
del scaled_x_test
gc.collect()
return y<load_from_csv> | n_estimators = [10,30,50,70,80,150,160, 170,175,180,185];
cv = StratifiedShuffleSplit(n_splits=10, test_size=.30, random_state=15)
parameters = {'n_estimators':n_estimators,
}
grid = GridSearchCV(BaggingClassifier(base_estimator= None,
bootstrap_features=False),
param_grid=parameters,
cv=cv,
n_jobs = -1)
grid.fit(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | df_train = pd.read_csv('.. /input/train_V2.csv', index_col='Id')
df_train.shape<feature_engineering> | print(grid.best_score_)
print(grid.best_params_)
print(grid.best_estimator_ ) | Titanic - Machine Learning from Disaster |
467,142 | df_train = reduce_mem_usage(df_train )<set_options> | bagging_grid = grid.best_estimator_
bagging_grid.score(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | gc.collect()<drop_column> | n_estimators = [100,140,145,150,160, 170,175,180,185];
cv = StratifiedShuffleSplit(n_splits=10, test_size=.30, random_state=15)
learning_r = [0.1,1,0.01,0.5]
parameters = {'n_estimators':n_estimators,
'learning_rate':learning_r
}
grid = GridSearchCV(AdaBoostClassifier(base_estimator= None,
),
param_grid=parameters,
cv=cv,
n_jobs = -1)
grid.fit(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | df_train.drop(df_train[df_train['winPlacePerc'].isnull() ].index, inplace=True )<import_modules> | print(grid.best_score_)
print(grid.best_params_)
print(grid.best_estimator_ ) | Titanic - Machine Learning from Disaster |
467,142 | import torch
from torch.utils.data import DataLoader, TensorDataset
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm<set_options> | adaBoost_grid = grid.best_estimator_
adaBoost_grid.score(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device<train_model> | gradient_boost = GradientBoostingClassifier()
gradient_boost.fit(X, y)
y_pred = gradient_boost.predict(X_test)
gradient_accy = round(accuracy_score(y_pred, y_test), 3)
print(gradient_accy ) | Titanic - Machine Learning from Disaster |
467,142 | class PyTorch(Estimator):
def fit(self, x_train, y_train, x_valid, y_valid):
train_tensor = TensorDataset(
torch.from_numpy(x_train.astype('float32')) ,
torch.from_numpy(y_train.astype('float32')))
train_loader = DataLoader(train_tensor, batch_size=256, shuffle=True)
self.model = nn.Sequential(
weight_norm(nn.Linear(x_train.shape[1], 128)) ,
nn.ReLU() ,
weight_norm(nn.Linear(128, 128)) ,
nn.ReLU() ,
weight_norm(nn.Linear(128, 128)) ,
nn.ReLU() ,
weight_norm(nn.Linear(128, 128)) ,
nn.ReLU() ,
weight_norm(nn.Linear(128, 1)) ).to(device)
for m in self.model:
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight_v)
nn.init.kaiming_normal_(m.weight_g)
nn.init.constant_(m.bias, 0)
criterion = nn.L1Loss()
optimizer = torch.optim.Adam(self.model.parameters() , betas=(0.9, 0.999), lr=1e-3)
self.model.train()
n_epochs = 50
for epoch in range(n_epochs):
epoch_loss = 0.0
for train_part, y_part in train_loader:
optimizer.zero_grad()
y_pred_part = self.model(train_part.to(device))
loss = criterion(y_pred_part.reshape(-1), y_part.to(device))
loss.backward()
optimizer.step()
epoch_loss += y_pred_part.shape[0] * loss.item()
print('Epoch %3d / %3d.Loss = %.5f' %(epoch + 1, n_epochs, epoch_loss / x_train.shape[0]))
def predict(self, x):
self.model.eval()
x_tensor = torch.from_numpy(x.astype('float32'))
x_loader = DataLoader(x_tensor, batch_size=256, shuffle=False)
y_pred = np.empty(0)
with torch.no_grad() :
for x_part in x_loader:
y_pred_part = self.model(x_part.to(device)).data.cpu().numpy().reshape(-1)
y_pred = np.append(y_pred, y_pred_part)
return y_pred<train_model> | from xgboost import XGBClassifier | Titanic - Machine Learning from Disaster |
467,142 | %%time
scaler = StandardScaler()
oof, trained_estimators = pipeline_fit(PyTorch() , df_train, scaler )<set_options> | XGBClassifier = XGBClassifier()
XGBClassifier.fit(X, y)
y_pred = XGBClassifier.predict(X_test)
XGBClassifier_accy = round(accuracy_score(y_pred, y_test), 3)
print(XGBClassifier_accy ) | Titanic - Machine Learning from Disaster |
467,142 | del df_train
gc.collect()<load_from_csv> | ExtraTreesClassifier = ExtraTreesClassifier()
ExtraTreesClassifier.fit(X, y)
y_pred = ExtraTreesClassifier.predict(X_test)
extraTree_accy = round(accuracy_score(y_pred, y_test), 3)
print(extraTree_accy)
| Titanic - Machine Learning from Disaster |
467,142 | df_test = pd.read_csv('.. /input/test_V2.csv', index_col = 'Id')
df_test.shape<feature_engineering> | GaussianProcessClassifier = GaussianProcessClassifier()
GaussianProcessClassifier.fit(X, y)
y_pred = GaussianProcessClassifier.predict(X_test)
gau_pro_accy = round(accuracy_score(y_pred, y_test), 3)
print(gau_pro_accy ) | Titanic - Machine Learning from Disaster |
467,142 | df_test = reduce_mem_usage(df_test )<create_dataframe> | voting_classifier = VotingClassifier(estimators=[
('lr_grid', logreg_grid),
('svc', svm_grid),
('random_forest', rf_grid),
('gradient_boosting', gradient_boost),
('decision_tree_grid',dectree_grid),
('knn_classifier', knn_grid),
('XGB_Classifier', XGBClassifier),
('bagging_classifier', bagging_grid),
('adaBoost_classifier',adaBoost_grid),
('ExtraTrees_Classifier', ExtraTreesClassifier),
('gaussian_classifier',gaussian),
('gaussian_process_classifier', GaussianProcessClassifier)
],voting='hard')
voting_classifier = voting_classifier.fit(X,y ) | Titanic - Machine Learning from Disaster |
467,142 | df_test_id = pd.DataFrame(index=df_test.index )<set_options> | y_pred = voting_classifier.predict(X_test)
voting_accy = round(accuracy_score(y_pred, y_test), 3)
print(voting_accy ) | Titanic - Machine Learning from Disaster |
467,142 | gc.collect()<predict_on_test> | all_models = [logreg_grid,
knn_grid,
knn_ran_grid,
svm_grid,
dectree_grid,
rf_grid,
bagging_grid,
adaBoost_grid,
voting_classifier]
c = {}
for i in all_models:
a = i.predict(X_test)
b = accuracy_score(a, y_test)
c[i] = b
| Titanic - Machine Learning from Disaster |
467,142 | <set_options><EOS> | test_prediction =(max(c, key=c.get)).predict(test)
submission = pd.DataFrame({
"PassengerId": passengerid,
"Survived": test_prediction
})
submission.PassengerId = submission.PassengerId.astype(int)
submission.Survived = submission.Survived.astype(int)
submission.to_csv("titanic1_submission.csv", index=False ) | Titanic - Machine Learning from Disaster |
556,065 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<save_to_csv> | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Perceptron
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_validate
from sklearn.model_selection import GridSearchCV
from sklearn import svm, tree, linear_model, neighbors, naive_bayes, ensemble, discriminant_analysis, gaussian_process
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn import feature_selection
from sklearn import model_selection
from sklearn import metrics
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import seaborn as sns
from pandas.tools.plotting import scatter_matrix | Titanic - Machine Learning from Disaster |
556,065 | df_oof = pd.DataFrame()
df_oof['pytorch_oof'] = oof
df_oof.to_csv('pytorch_oof.csv', index_label='id' )<save_to_csv> | train_df = pd.read_csv(".. /input/train.csv")
test_df = pd.read_csv(".. /input/test.csv")
data_df = train_df.append(test_df ) | Titanic - Machine Learning from Disaster |
556,065 | df_submission = pd.DataFrame(index=df_test_id.index)
df_submission['winPlacePerc'] = y
df_submission.to_csv('pytorch_raw.csv', index_label='Id' )<load_from_csv> | data_df['Title'] = data_df['Name']
for name_string in data_df['Name']:
data_df['Title'] = data_df['Name'].str.extract('([A-Za-z]+)\.', expand=True)
mapping = {'Mlle': 'Miss', 'Major': 'Mr', 'Col': 'Mr', 'Sir': 'Mr', 'Don': 'Mr', 'Mme': 'Miss',
'Jonkheer': 'Mr', 'Lady': 'Mrs', 'Capt': 'Mr', 'Countess': 'Mrs', 'Ms': 'Miss', 'Dona': 'Mrs'}
data_df.replace({'Title': mapping}, inplace=True)
titles = ['Dr', 'Master', 'Miss', 'Mr', 'Mrs', 'Rev']
for title in titles:
age_to_impute = data_df.groupby('Title')['Age'].median() [titles.index(title)]
data_df.loc[(data_df['Age'].isnull())&(data_df['Title'] == title), 'Age'] = age_to_impute
train_df['Age'] = data_df['Age'][:891]
test_df['Age'] = data_df['Age'][891:]
data_df.drop('Title', axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
556,065 | df_test = pd.read_csv('.. /input/test_V2.csv')
df_test.shape<merge> | data_df['Family_Size'] = data_df['Parch'] + data_df['SibSp']
train_df['Family_Size'] = data_df['Family_Size'][:891]
test_df['Family_Size'] = data_df['Family_Size'][891:] | Titanic - Machine Learning from Disaster |
556,065 | df_submission = df_submission.merge(df_test[['Id', 'matchId', 'groupId', 'maxPlace', 'numGroups']], on='Id', how='left')
df_submission.head()<merge> | data_df['Last_Name'] = data_df['Name'].apply(lambda x: str.split(x, ",")[0])
data_df['Fare'].fillna(data_df['Fare'].mean() , inplace=True)
DEFAULT_SURVIVAL_VALUE = 0.5
data_df['Family_Survival'] = DEFAULT_SURVIVAL_VALUE
for grp, grp_df in data_df[['Survived','Name', 'Last_Name', 'Fare', 'Ticket', 'PassengerId',
'SibSp', 'Parch', 'Age', 'Cabin']].groupby(['Last_Name', 'Fare']):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
data_df.loc[data_df['PassengerId'] == passID, 'Family_Survival'] = 1
elif(smin==0.0):
data_df.loc[data_df['PassengerId'] == passID, 'Family_Survival'] = 0
print("Number of passengers with family survival information:",
data_df.loc[data_df['Family_Survival']!=0.5].shape[0] ) | Titanic - Machine Learning from Disaster |
556,065 | df_submission_group = df_submission.groupby(['matchId', 'groupId'] ).first().reset_index()
df_submission_group['rank'] = df_submission_group.groupby(['matchId'])['winPlacePerc'].rank()
df_submission_group = df_submission_group.merge(df_submission_group.groupby('matchId')['rank'].max().to_frame('max_rank' ).reset_index() , on='matchId', how='left')
df_submission_group['adjusted_perc'] =(df_submission_group['rank'] - 1)/(df_submission_group['numGroups'] - 1)
df_submission = df_submission.merge(df_submission_group[['adjusted_perc', 'matchId', 'groupId']], on=['matchId', 'groupId'], how='left')
df_submission['winPlacePerc'] = df_submission['adjusted_perc']
df_submission.head()<feature_engineering> | for _, grp_df in data_df.groupby('Ticket'):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
if(row['Family_Survival'] == 0)|(row['Family_Survival']== 0.5):
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
data_df.loc[data_df['PassengerId'] == passID, 'Family_Survival'] = 1
elif(smin==0.0):
data_df.loc[data_df['PassengerId'] == passID, 'Family_Survival'] = 0
print("Number of passenger with family/group survival information: "
+str(data_df[data_df['Family_Survival']!=0.5].shape[0]))
train_df['Family_Survival'] = data_df['Family_Survival'][:891]
test_df['Family_Survival'] = data_df['Family_Survival'][891:] | Titanic - Machine Learning from Disaster |
556,065 | df_submission.loc[df_submission.maxPlace == 0, 'winPlacePerc'] = 0
df_submission.loc[df_submission.maxPlace == 1, 'winPlacePerc'] = 1<feature_engineering> | data_df['Fare'].fillna(data_df['Fare'].median() , inplace = True)
data_df['FareBin'] = pd.qcut(data_df['Fare'], 5)
label = LabelEncoder()
data_df['FareBin_Code'] = label.fit_transform(data_df['FareBin'])
train_df['FareBin_Code'] = data_df['FareBin_Code'][:891]
test_df['FareBin_Code'] = data_df['FareBin_Code'][891:]
train_df.drop(['Fare'], 1, inplace=True)
test_df.drop(['Fare'], 1, inplace=True ) | Titanic - Machine Learning from Disaster |
556,065 | t = df_submission.loc[df_submission.maxPlace > 1]
gap = 1.0 /(t.maxPlace.values - 1)
fixed_perc = np.around(t.winPlacePerc.values / gap)* gap
df_submission.loc[df_submission.maxPlace > 1, 'winPlacePerc'] = fixed_perc<feature_engineering> | data_df['AgeBin'] = pd.qcut(data_df['Age'], 4)
label = LabelEncoder()
data_df['AgeBin_Code'] = label.fit_transform(data_df['AgeBin'])
train_df['AgeBin_Code'] = data_df['AgeBin_Code'][:891]
test_df['AgeBin_Code'] = data_df['AgeBin_Code'][891:]
train_df.drop(['Age'], 1, inplace=True)
test_df.drop(['Age'], 1, inplace=True ) | Titanic - Machine Learning from Disaster |
556,065 | df_submission.loc[(df_submission.maxPlace > 1)&(df_submission.numGroups == 1), 'winPlacePerc'] = 0
assert df_submission['winPlacePerc'].isnull().sum() == 0<save_to_csv> | train_df['Sex'].replace(['male','female'],[0,1],inplace=True)
test_df['Sex'].replace(['male','female'],[0,1],inplace=True)
train_df.drop(['Name', 'PassengerId', 'SibSp', 'Parch', 'Ticket', 'Cabin',
'Embarked'], axis = 1, inplace = True)
test_df.drop(['Name','PassengerId', 'SibSp', 'Parch', 'Ticket', 'Cabin',
'Embarked'], axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
556,065 | df_submission[['Id', 'winPlacePerc']].to_csv('pytorch_adjusted.csv', index=False )<load_from_csv> | X = train_df.drop('Survived', 1)
y = train_df['Survived']
X_test = test_df.copy() | Titanic - Machine Learning from Disaster |
556,065 | df_sub1 = pd.read_csv(".. /input/pytorch-baseline-model/submission_raw.csv")
df_sub2 = pd.read_csv(".. /input/lightgbm-baseline/submission_adjusted.csv")
df_sub3 = pd.read_csv(".. /input/mlp-and-fe/submission.csv")
df_test = pd.read_csv(".. /input/pubg-finish-placement-prediction/test_V2.csv" )<merge> | std_scaler = StandardScaler()
X = std_scaler.fit_transform(X)
X_test = std_scaler.transform(X_test ) | Titanic - Machine Learning from Disaster |
556,065 | df_sub1 = df_sub1.merge(df_test[["Id", "matchId", "groupId", "maxPlace", "numGroups"]], on="Id",how="left" )<merge> | n_neighbors = [6,7,8,9,10,11,12,14,16,18,20,22]
algorithm = ['auto']
weights = ['uniform', 'distance']
leaf_size = list(range(1,50,5))
hyperparams = {'algorithm': algorithm, 'weights': weights, 'leaf_size': leaf_size,
'n_neighbors': n_neighbors}
gd=GridSearchCV(estimator = KNeighborsClassifier() , param_grid = hyperparams, verbose=True,
cv=10, scoring = "roc_auc")
gd.fit(X, y)
print(gd.best_score_)
print(gd.best_estimator_ ) | Titanic - Machine Learning from Disaster |
556,065 | df_sub1_group = df_sub1.groupby(["matchId", "groupId"] ).first().reset_index()
df_sub1_group["rank"] = df_sub1_group.groupby(["matchId"])["winPlacePerc"].rank()
df_sub1_group = df_sub1_group.merge(
df_sub1_group.groupby("matchId")["rank"].max().to_frame("max_rank" ).reset_index() ,
on="matchId", how="left")
df_sub1_group["adjusted_perc"] =(df_sub1_group["rank"] - 1)/(df_sub1_group["numGroups"] - 1)
df_sub1 = df_sub1.merge(df_sub1_group[["adjusted_perc", "matchId", "groupId"]], on=["matchId", "groupId"], how="left")
df_sub1["winPlacePerc"] = df_sub1["adjusted_perc"]<feature_engineering> | gd.best_estimator_.fit(X, y)
y_pred = gd.best_estimator_.predict(X_test ) | Titanic - Machine Learning from Disaster |
556,065 | df_sub1.loc[df_sub1.maxPlace == 0, "winPlacePerc"] = 0
df_sub1.loc[df_sub1.maxPlace == 1, "winPlacePerc"] = 1<feature_engineering> | knn = KNeighborsClassifier(algorithm='auto', leaf_size=26, metric='minkowski',
metric_params=None, n_jobs=1, n_neighbors=6, p=2,
weights='uniform')
knn.fit(X, y)
y_pred = knn.predict(X_test ) | Titanic - Machine Learning from Disaster |
556,065 | <feature_engineering><EOS> | temp = pd.DataFrame(pd.read_csv(".. /input/test.csv")['PassengerId'])
temp['Survived'] = y_pred
temp.to_csv(".. /working/submission.csv", index = False ) | Titanic - Machine Learning from Disaster |
191,323 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<feature_engineering> | %matplotlib inline
py.init_notebook_mode(connected=True)
train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
PassengerId = test['PassengerId']
train.head(3 ) | Titanic - Machine Learning from Disaster |
191,323 | df_sub1["winPlacePerc"] =(df_sub1["winPlacePerc"] + df_sub2["winPlacePerc"] + df_sub3["winPlacePerc"])/ 3
df_sub1 = df_sub1[["Id", "winPlacePerc"]]<load_from_csv> | original_train = train.copy()
full_data = [train, test]
train['Has_Cabin'] = train["Cabin"].apply(lambda x: 0 if type(x)== float else 1)
test['Has_Cabin'] = test["Cabin"].apply(lambda x: 0 if type(x)== float else 1)
for dataset in full_data:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
for dataset in full_data:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
for dataset in full_data:
dataset['Embarked'] = dataset['Embarked'].fillna('S')
for dataset in full_data:
dataset['Fare'] = dataset['Fare'].fillna(train['Fare'].median())
for dataset in full_data:
age_avg = dataset['Age'].mean()
age_std = dataset['Age'].std()
age_null_count = dataset['Age'].isnull().sum()
age_null_random_list = np.random.randint(age_avg - age_std, age_avg + age_std, size=age_null_count)
dataset.loc[np.isnan(dataset['Age']), 'Age'] = age_null_random_list
dataset['Age'] = dataset['Age'].astype(int)
def get_title(name):
title_search = re.search('([A-Za-z]+)\.', name)
if title_search:
return title_search.group(1)
return ""
for dataset in full_data:
dataset['Title'] = dataset['Name'].apply(get_title)
for dataset in full_data:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
for dataset in full_data:
dataset['Sex'] = dataset['Sex'].map({'female': 0, 'male': 1} ).astype(int)
title_mapping = {"Mr": 1, "Master": 2, "Mrs": 3, "Miss": 4, "Rare": 5}
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int)
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] ; | Titanic - Machine Learning from Disaster |
191,323 | df_test = pd.read_csv(".. /input/pubg-finish-placement-prediction/test_V2.csv" )<merge> | drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp']
train = train.drop(drop_elements, axis = 1)
test = test.drop(drop_elements, axis = 1 ) | Titanic - Machine Learning from Disaster |
191,323 | df_sub1 = df_sub1.merge(df_test[["Id", "matchId", "groupId", "maxPlace", "numGroups"]], on="Id",how="left" )<merge> | train[['Title', 'Survived']].groupby(['Title'], as_index=False ).agg(['mean', 'count', 'sum'])
| Titanic - Machine Learning from Disaster |
191,323 | df_sub1_group = df_sub1.groupby(["matchId", "groupId"] ).first().reset_index()
df_sub1_group["rank"] = df_sub1_group.groupby(["matchId"])["winPlacePerc"].rank()
df_sub1_group = df_sub1_group.merge(
df_sub1_group.groupby("matchId")["rank"].max().to_frame("max_rank" ).reset_index() ,
on="matchId", how="left")
df_sub1_group["adjusted_perc"] =(df_sub1_group["rank"] - 1)/(df_sub1_group["numGroups"] - 1)
df_sub1 = df_sub1.merge(df_sub1_group[["adjusted_perc", "matchId", "groupId"]], on=["matchId", "groupId"], how="left")
df_sub1["winPlacePerc"] = df_sub1["adjusted_perc"]<feature_engineering> | train[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).agg(['mean', 'count', 'sum'])
| Titanic - Machine Learning from Disaster |
191,323 | df_sub1.loc[df_sub1.maxPlace == 0, "winPlacePerc"] = 0
df_sub1.loc[df_sub1.maxPlace == 1, "winPlacePerc"] = 1<feature_engineering> | title_and_sex = original_train.copy() [['Name', 'Sex']]
title_and_sex['Title'] = title_and_sex['Name'].apply(get_title)
title_and_sex['Sex'] = title_and_sex['Sex'].map({'female': 0, 'male': 1} ).astype(int)
title_and_sex[['Title', 'Sex']].groupby(['Title'], as_index=False ).agg(['mean', 'count', 'sum'])
| Titanic - Machine Learning from Disaster |
191,323 | subset = df_sub1.loc[df_sub1.maxPlace > 1]
gap = 1.0 /(subset.maxPlace.values - 1)
new_perc = np.around(subset.winPlacePerc.values / gap)* gap
df_sub1.loc[df_sub1.maxPlace > 1, "winPlacePerc"] = new_perc<feature_engineering> | def get_gini_impurity(survived_count, total_count):
survival_prob = survived_count/total_count
not_survival_prob =(1 - survival_prob)
random_observation_survived_prob = survival_prob
random_observation_not_survived_prob =(1 - random_observation_survived_prob)
mislabelling_survided_prob = not_survival_prob * random_observation_survived_prob
mislabelling_not_survided_prob = survival_prob * random_observation_not_survived_prob
gini_impurity = mislabelling_survided_prob + mislabelling_not_survided_prob
return gini_impurity | Titanic - Machine Learning from Disaster |
191,323 | df_sub1.loc[(df_sub1.maxPlace > 1)&(df_sub1.numGroups == 1), "winPlacePerc"] = 0
assert df_sub1["winPlacePerc"].isnull().sum() == 0<feature_engineering> | gini_impurity_starting_node = get_gini_impurity(342, 891)
gini_impurity_starting_node | Titanic - Machine Learning from Disaster |
191,323 | df_sub1["winPlacePerc"] = df_sub1["winPlacePerc"]<save_to_csv> | gini_impurity_men = get_gini_impurity(109, 577)
gini_impurity_men | Titanic - Machine Learning from Disaster |
191,323 | df_sub1[["Id", "winPlacePerc"]].to_csv("submission.csv", index=False )<load_from_csv> | gini_impurity_women = get_gini_impurity(233, 314)
gini_impurity_women | Titanic - Machine Learning from Disaster |
191,323 | print('train')
train_df = import_data('.. /input/train.csv')
print('test')
test_df = import_data('.. /input/test.csv' )<groupby> | men_weight = 577/891
women_weight = 314/891
weighted_gini_impurity_sex_split =(gini_impurity_men * men_weight)+(gini_impurity_women * women_weight)
sex_gini_decrease = weighted_gini_impurity_sex_split - gini_impurity_starting_node
sex_gini_decrease | Titanic - Machine Learning from Disaster |
191,323 | def featureEngineering(df):
df_size = df.groupby(['matchId','groupId'] ).size().reset_index(name='group_size')
df_mean = df.groupby(['matchId','groupId'] ).mean().reset_index()
df_max = df.groupby(['matchId','groupId'] ).max().reset_index()
df_min = df.groupby(['matchId','groupId'] ).min().reset_index()
df_match_mean = df.groupby(['matchId'] ).mean().reset_index()
df_train_max_PG = df.groupby(['matchId','groupId'])['kills'].count().reset_index().groupby('matchId')['kills'].max().reset_index()
df_train_max_PG.columns = ['matchId','max_players_in_group']
df = pd.merge(df, df_mean, suffixes=["", "_mean"], how='left', on=['matchId', 'groupId'])
df = pd.merge(df, df_max, suffixes=["", "_max"], how='left', on=['matchId', 'groupId'])
df = pd.merge(df, df_min, suffixes=["", "_min"], how='left', on=['matchId', 'groupId'])
df = pd.merge(df, df_match_mean, suffixes=["", "_match_mean"], how='left', on=['matchId'])
df = pd.merge(df, df_size, how='left', on=['matchId', 'groupId'])
df = pd.merge(df, df_train_max_PG, how='left', on=['matchId'])
return df
train_df = featureEngineering(train_df)
test_df = featureEngineering(test_df )<filter> | gini_impurity_title_1 = get_gini_impurity(81, 517)
gini_impurity_title_1 | Titanic - Machine Learning from Disaster |
191,323 | print('Old size: %d' % len(train_df))
train_df = train_df[(train_df.kills <= 35)&(train_df.assists <= 13)&(train_df.boosts <= 16)&(train_df.damageDealt <= 3500)
&(train_df.DBNOs <= 35)&(train_df.headshotKills <= 22)&(train_df.killStreaks <= 10)&(train_df.assists <= 13)
&(train_df.longestKill <= 1000)&(train_df.revives <= 20)&(train_df.rideDistance <= 28000)&(train_df.roadKills <= 15)
&(train_df.swimDistance <= 3500)&(train_df.teamKills <= 10)&(train_df.vehicleDestroys <= 3)&(train_df.walkDistance <= 13000)
&(train_df.weaponsAcquired <= 60)]
print('New size: %d' % len(train_df))
def headshot_precent(data):
data['headPerc']=data['headshotKills']/data['kills']
data['roadPerc']=data['roadKills']/data['kills']
data['totalDistance']=data['rideDistance']+data['swimDistance']+data['walkDistance']
data['rideDistancePerc']=data['rideDistance']/data['totalDistance']
data['swimDistancePerc']=data['swimDistance']/data['totalDistance']
data['walkDistancePerc']=data['walkDistance']/data['totalDistance']
data.fillna(0,axis=1,inplace=True)
return data.head()
headshot_precent(train_df)
headshot_precent(test_df )<drop_column> | gini_impurity_title_others = get_gini_impurity(261, 374)
gini_impurity_title_others | Titanic - Machine Learning from Disaster |
191,323 | features_not2use = ['Id', 'groupId', 'matchId','winPlacePerc_mean','winPlacePerc_max','winPlacePerc_min','winPlacePerc_match_mean']
for df in [train_df]:
df.drop(features_not2use, axis=1, inplace=True)
features_not2use = ['Id', 'groupId', 'matchId']
for df in [test_df]:
df.drop(features_not2use, axis=1, inplace=True)
X_train = train_df.sample(frac=0.8)
X_val = train_df.loc[~train_df.index.isin(X_train.index)]
<prepare_x_and_y> | title_1_weight = 517/891
title_others_weight = 374/891
weighted_gini_impurity_title_split =(gini_impurity_title_1 * title_1_weight)+(gini_impurity_title_others * title_others_weight)
title_gini_decrease = weighted_gini_impurity_title_split - gini_impurity_starting_node
title_gini_decrease | Titanic - Machine Learning from Disaster |
191,323 | y_train = X_train['winPlacePerc']
X_train.drop('winPlacePerc', axis=1, inplace=True)
y_val = X_val['winPlacePerc']
X_val.drop('winPlacePerc', axis=1, inplace=True )<train_model> | cv = KFold(n_splits=10)
accuracies = list()
max_attributes = len(list(test))
depth_range = range(1, max_attributes + 1)
for depth in depth_range:
fold_accuracy = []
tree_model = tree.DecisionTreeClassifier(max_depth = depth)
")
for train_fold, valid_fold in cv.split(train):
f_train = train.loc[train_fold]
f_valid = train.loc[valid_fold]
model = tree_model.fit(X = f_train.drop(['Survived'], axis=1),
y = f_train["Survived"])
valid_acc = model.score(X = f_valid.drop(['Survived'], axis=1),
y = f_valid["Survived"])
fold_accuracy.append(valid_acc)
avg = sum(fold_accuracy)/len(fold_accuracy)
accuracies.append(avg)
")
")
df = pd.DataFrame({"Max Depth": depth_range, "Average Accuracy": accuracies})
df = df[["Max Depth", "Average Accuracy"]]
print(df.to_string(index=False)) | Titanic - Machine Learning from Disaster |
191,323 | start_time = time.time()
model = LGBMRegressor(iterations=250, learning_rate=0.05, loss_function='MAE',eval_metric='MAE', depth = -1,
use_best_model=True, od_type="Iter", od_wait=10, thread_count=128, random_seed = 123, num_leaves= 144,n_estimators= 800,
bagging_fraction= 0.8, bagging_freq= 5, feature_fraction= 0.9, objective= 'regression_l2')
model.fit(X_train, y_train, eval_set=(X_val, y_val))
end_time = time.time()
print('The training time = {}'.format(end_time - start_time))<predict_on_test> | y_train = train['Survived']
x_train = train.drop(['Survived'], axis=1 ).values
x_test = test.values
decision_tree = tree.DecisionTreeClassifier(max_depth = 3)
decision_tree.fit(x_train, y_train)
y_pred = decision_tree.predict(x_test)
submission = pd.DataFrame({
"PassengerId": PassengerId,
"Survived": y_pred
})
submission.to_csv('submission.csv', index=False)
with open("tree1.dot", 'w')as f:
f = tree.export_graphviz(decision_tree,
out_file=f,
max_depth = 3,
impurity = True,
feature_names = list(train.drop(['Survived'], axis=1)) ,
class_names = ['Died', 'Survived'],
rounded = True,
filled= True)
check_call(['dot','-Tpng','tree1.dot','-o','tree1.png'])
img = Image.open("tree1.png")
draw = ImageDraw.Draw(img)
font = ImageFont.truetype('/usr/share/fonts/truetype/liberation/LiberationSerif-Bold.ttf', 26)
draw.text(( 10, 0),
'"Title <= 1.5" corresponds to "Mr." title',
(0,0,255),
font=font)
img.save('sample-out.png')
PImage("sample-out.png")
| Titanic - Machine Learning from Disaster |
191,323 | pred = model.predict(test_df )<save_to_csv> | acc_decision_tree = round(decision_tree.score(x_train, y_train)* 100, 2)
acc_decision_tree | Titanic - Machine Learning from Disaster |
9,293,880 | test_new = import_data('.. /input/test.csv')
test_new['winPlacePercPred'] = pred
aux = test_new.groupby(['matchId','groupId'])['winPlacePercPred'].agg('mean' ).groupby('matchId' ).rank(pct=True ).reset_index()
aux.columns = ['matchId','groupId','winPlacePerc']
test_new = test_new.merge(aux, how='left', on=['matchId','groupId'])
submission = import_data('.. /input/sample_submission.csv')
submission = test_new[['Id','winPlacePerc']]
submission.to_csv('sample_submission.csv', index=False )<set_options> | warnings.filterwarnings('ignore')
| Titanic - Machine Learning from Disaster |
9,293,880 | warnings.filterwarnings("ignore")
gc.enable()
print(os.listdir(".. /input"))<load_from_csv> | %matplotlib inline
mpl.style.use('ggplot')
sns.set_style('white')
pylab.rcParams['figure.figsize'] = 12,8 | Titanic - Machine Learning from Disaster |
9,293,880 | def feature_engineering(is_train=True,debug=True):
if is_train:
print("processing train.csv")
if debug == True:
df = pd.read_csv('.. /input/train_V2.csv', nrows=10000)
else:
df = pd.read_csv('.. /input/train_V2.csv')
df = df[df['maxPlace'] > 1]
else:
print("processing test.csv")
df = pd.read_csv('.. /input/test_V2.csv')
print("remove some columns")
target = 'winPlacePerc'
print("Adding Features")
df['headshotrate'] = df['kills']/df['headshotKills']
df['killStreakrate'] = df['killStreaks']/df['kills']
df['healthitems'] = df['heals'] + df['boosts']
df['totalDistance'] = df['rideDistance'] + df["walkDistance"] + df["swimDistance"]
df['killPlace_over_maxPlace'] = df['killPlace'] / df['maxPlace']
df['headshotKills_over_kills'] = df['headshotKills'] / df['kills']
df['distance_over_weapons'] = df['totalDistance'] / df['weaponsAcquired']
df['walkDistance_over_heals'] = df['walkDistance'] / df['heals']
df['walkDistance_over_kills'] = df['walkDistance'] / df['kills']
df['killsPerWalkDistance'] = df['kills'] / df['walkDistance']
df["skill"] = df["headshotKills"] + df["roadKills"]
df[df == np.Inf] = np.NaN
df[df == np.NINF] = np.NaN
print("Removing Na's From DF")
df.fillna(0, inplace=True)
features = list(df.columns)
features.remove("Id")
features.remove("matchId")
features.remove("groupId")
features.remove("matchType")
y = None
if is_train:
print("get target")
y = df.groupby(['matchId','groupId'])[target].agg('mean')
features.remove(target)
print("get group mean feature")
agg = df.groupby(['matchId','groupId'])[features].agg('mean')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
if is_train: df_out = agg.reset_index() [['matchId','groupId']]
else: df_out = df[['matchId','groupId']]
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_mean", "_mean_rank"], how='left', on=['matchId', 'groupId'])
print("get group max feature")
agg = df.groupby(['matchId','groupId'])[features].agg('max')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_max", "_max_rank"], how='left', on=['matchId', 'groupId'])
print("get group min feature")
agg = df.groupby(['matchId','groupId'])[features].agg('min')
agg_rank = agg.groupby('matchId')[features].rank(pct=True ).reset_index()
df_out = df_out.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
df_out = df_out.merge(agg_rank, suffixes=["_min", "_min_rank"], how='left', on=['matchId', 'groupId'])
print("get group size feature")
agg = df.groupby(['matchId','groupId'] ).size().reset_index(name='group_size')
df_out = df_out.merge(agg, how='left', on=['matchId', 'groupId'])
print("get match mean feature")
agg = df.groupby(['matchId'])[features].agg('mean' ).reset_index()
df_out = df_out.merge(agg, suffixes=["", "_match_mean"], how='left', on=['matchId'])
print("get match size feature")
agg = df.groupby(['matchId'] ).size().reset_index(name='match_size')
df_out = df_out.merge(agg, how='left', on=['matchId'])
df_out.drop(["matchId", "groupId"], axis=1, inplace=True)
X = df_out
del df, df_out, agg, agg_rank
gc.collect()
return X, y<prepare_x_and_y> | data_raw = pd.read_csv('.. /input/titanic/train.csv')
data_val = pd.read_csv('.. /input/titanic/test.csv')
data1 = data_raw.copy(deep = True)
data_cleaner = [data1, data_val]
print(data_raw.info())
data_raw.sample(10 ) | Titanic - Machine Learning from Disaster |
9,293,880 | x_train, y_train = feature_engineering(True,False)
x_test, _ = feature_engineering(False,True )<drop_column> | women = data_raw.loc[data_raw.Sex == 'female']["Survived"]
rate_women = sum(women)/len(women)
print("% of women who survived:", rate_women ) | Titanic - Machine Learning from Disaster |
9,293,880 | x_train = reduce_mem_usage(x_train)
x_test = reduce_mem_usage(x_test )<load_from_csv> | men = data_raw.loc[data_raw.Sex == 'male']["Survived"]
rate_men = sum(men)/len(men)
print("% of men who survived:", rate_men ) | Titanic - Machine Learning from Disaster |
9,293,880 | def post_rst(pred_test):
df_sub = pd.read_csv(".. /input/sample_submission_V2.csv")
df_test = pd.read_csv(".. /input/test_V2.csv")
df_sub['winPlacePerc'] = pred_test
df_sub = df_sub.merge(df_test[["Id", "matchId", "groupId", "maxPlace", "numGroups"]], on="Id", how="left")
df_sub_group = df_sub.groupby(["matchId", "groupId"] ).first().reset_index()
df_sub_group["rank"] = df_sub_group.groupby(["matchId"])["winPlacePerc"].rank()
df_sub_group = df_sub_group.merge(
df_sub_group.groupby("matchId")["rank"].max().to_frame("max_rank" ).reset_index() ,
on="matchId", how="left")
df_sub_group["adjusted_perc"] =(df_sub_group["rank"] - 1)/(df_sub_group["numGroups"] - 1)
df_sub = df_sub.merge(df_sub_group[["adjusted_perc", "matchId", "groupId"]], on=["matchId", "groupId"], how="left")
df_sub["winPlacePerc"] = df_sub["adjusted_perc"]
df_sub.loc[df_sub.maxPlace == 0, "winPlacePerc"] = 0
df_sub.loc[df_sub.maxPlace == 1, "winPlacePerc"] = 1
subset = df_sub.loc[df_sub.maxPlace > 1]
gap = 1.0 /(subset.maxPlace.values - 1)
new_perc = np.around(subset.winPlacePerc.values / gap)* gap
df_sub.loc[df_sub.maxPlace > 1, "winPlacePerc"] = new_perc
df_sub.loc[(df_sub.maxPlace > 1)&(df_sub.numGroups == 1), "winPlacePerc"] = 0
assert df_sub["winPlacePerc"].isnull().sum() == 0
df_sub[["Id", "winPlacePerc"]].to_csv("submission_adjusted.csv", index=False )<choose_model_class> | for dataset in data_cleaner:
dataset['Age'].fillna(dataset['Age'].median() , inplace = True)
dataset['Embarked'].fillna(dataset['Embarked'].mode() [0], inplace = True)
dataset['Fare'].fillna(dataset['Fare'].median() , inplace = True)
drop_column = ['PassengerId','Cabin', 'Ticket']
data1.drop(drop_column, axis=1, inplace = True)
print(data1.isnull().sum())
print("-"*10)
print(data_val.isnull().sum() ) | Titanic - Machine Learning from Disaster |
9,293,880 | def nn_model(input_shape=x_train.shape[1], hidden_size=64):
model = Sequential()
model.add(Dense(hidden_size, input_dim=input_shape, kernel_initializer='normal'))
model.add(LeakyReLU(0.1))
model.add(Dense(hidden_size, kernel_initializer='normal'))
model.add(LeakyReLU(0.1))
model.add(Dense(hidden_size, kernel_initializer='normal'))
model.add(LeakyReLU(0.1))
model.add(Dense(hidden_size, kernel_initializer='normal'))
model.add(LeakyReLU(0.1))
model.add(Dense(1, kernel_initializer='normal'))
model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mae'])
return model<split> | for dataset in data_cleaner:
dataset['FamilySize'] = dataset ['SibSp'] + dataset['Parch'] + 1
dataset['IsAlone'] = 1
dataset['IsAlone'].loc[dataset['FamilySize'] > 1] = 0
dataset['Title'] = dataset['Name'].str.split(", ", expand=True)[1].str.split(".", expand=True)[0]
dataset['FareBin'] = pd.qcut(dataset['Fare'], 4)
dataset['AgeBin'] = pd.cut(dataset['Age'].astype(int), 5)
stat_min = 10
title_names =(data1['Title'].value_counts() < stat_min)
data1['Title'] = data1['Title'].apply(lambda x: 'Misc' if title_names.loc[x] == True else x)
print(data1['Title'].value_counts())
print("-"*10)
data1.info()
data_val.info()
data1.sample(10 ) | Titanic - Machine Learning from Disaster |
9,293,880 | def train_val_split(x_train, y_train):
train_index = round(int(x_train.shape[0]*0.8))
dev_X = x_train[:train_index]
val_X = x_train[train_index:]
dev_y = y_train[:train_index]
val_y = y_train[train_index:]
del x_train, y_train
gc.collect() ;
return dev_X, val_X, dev_y, val_y<split> | label = LabelEncoder()
for dataset in data_cleaner:
dataset['Sex_Code'] = label.fit_transform(dataset['Sex'])
dataset['Embarked_Code'] = label.fit_transform(dataset['Embarked'])
dataset['Title_Code'] = label.fit_transform(dataset['Title'])
dataset['AgeBin_Code'] = label.fit_transform(dataset['AgeBin'])
dataset['FareBin_Code'] = label.fit_transform(dataset['FareBin'])
Target = ['Survived']
data1_x = ['Sex','Pclass', 'Embarked', 'Title','SibSp', 'Parch', 'Age', 'Fare', 'FamilySize', 'IsAlone']
data1_x_calc = ['Sex_Code','Pclass', 'Embarked_Code', 'Title_Code','SibSp', 'Parch', 'Age', 'Fare']
data1_xy = Target + data1_x
print('Original X Y: ', data1_xy, '
')
data1_x_bin = ['Sex_Code','Pclass', 'Embarked_Code', 'Title_Code', 'FamilySize', 'AgeBin_Code', 'FareBin_Code']
data1_xy_bin = Target + data1_x_bin
print('Bin X Y: ', data1_xy_bin, '
')
data1_dummy = pd.get_dummies(data1[data1_x])
data1_x_dummy = data1_dummy.columns.tolist()
data1_xy_dummy = Target + data1_x_dummy
print('Dummy X Y: ', data1_xy_dummy, '
')
data1_dummy.head() | Titanic - Machine Learning from Disaster |
9,293,880 | dev_X, val_X, dev_y, val_y = train_val_split(x_train, y_train)
def run_lgb(train_X, train_y, val_X, val_y, x_test):
params = {"objective" : "regression", "metric" : "mae", 'n_estimators':5000, 'early_stopping_rounds':200,
"num_leaves" : 150, "learning_rate" : 0.05, "bagging_fraction" : 0.5,
"bagging_seed" : 0, "num_threads" : 4,"colsample_bytree" : 0.5
}
lgtrain = lgb.Dataset(train_X, label=train_y)
lgval = lgb.Dataset(val_X, label=val_y)
model = lgb.train(params, lgtrain, valid_sets=[lgtrain, lgval], early_stopping_rounds=200, verbose_eval=1000)
pred_test_y = model.predict(x_test, num_iteration=model.best_iteration)
return pred_test_y
pred_test_lgb = run_lgb(dev_X, dev_y, val_X, val_y, x_test)
pred_test_lgb = pred_test_lgb.reshape(-1,1)
del dev_X, val_X, dev_y, val_y
gc.collect()<train_model> | train1_x, test1_x, train1_y, test1_y = model_selection.train_test_split(data1[data1_x_calc], data1[Target], random_state = 0)
train1_x_bin, test1_x_bin, train1_y_bin, test1_y_bin = model_selection.train_test_split(data1[data1_x_bin], data1[Target] , random_state = 0)
train1_x_dummy, test1_x_dummy, train1_y_dummy, test1_y_dummy = model_selection.train_test_split(data1_dummy[data1_x_dummy], data1[Target], random_state = 0)
print("Data1 Shape: {}".format(data1.shape))
print("Train1 Shape: {}".format(train1_x.shape))
print("Test1 Shape: {}".format(test1_x.shape))
train1_x_bin.head() | Titanic - Machine Learning from Disaster |
9,293,880 | np.random.seed(42)
std_scaler = StandardScaler().fit(x_train)
x_train = std_scaler.transform(x_train)
x_test = std_scaler.transform(x_test)
mlp_model = LinearRegression()
mlp_model.fit(x_train, y_train, batch_size=128, epochs=8, verbose=1, validation_split=0.1, shuffle=True)
pred_test_mlp = mlp_model.predict(x_test)
del mlp_model
gc.collect()<drop_column> | MLA = [
ensemble.AdaBoostClassifier() ,
ensemble.BaggingClassifier() ,
ensemble.ExtraTreesClassifier() ,
ensemble.GradientBoostingClassifier() ,
ensemble.RandomForestClassifier() ,
gaussian_process.GaussianProcessClassifier() ,
linear_model.LogisticRegressionCV() ,
linear_model.PassiveAggressiveClassifier() ,
linear_model.RidgeClassifierCV() ,
linear_model.SGDClassifier() ,
linear_model.Perceptron() ,
naive_bayes.BernoulliNB() ,
naive_bayes.GaussianNB() ,
neighbors.KNeighborsClassifier() ,
svm.SVC(probability=True),
svm.NuSVC(probability=True),
svm.LinearSVC() ,
tree.DecisionTreeClassifier() ,
tree.ExtraTreeClassifier() ,
discriminant_analysis.LinearDiscriminantAnalysis() ,
discriminant_analysis.QuadraticDiscriminantAnalysis() ,
XGBClassifier()
]
cv_split = model_selection.ShuffleSplit(n_splits = 10, test_size =.3, train_size =.6, random_state = 0)
MLA_columns = ['MLA Name', 'MLA Parameters','MLA Train Accuracy Mean', 'MLA Test Accuracy Mean', 'MLA Test Accuracy 3*STD' ,'MLA Time']
MLA_compare = pd.DataFrame(columns = MLA_columns)
MLA_predict = data1[Target]
row_index = 0
for alg in MLA:
MLA_name = alg.__class__.__name__
MLA_compare.loc[row_index, 'MLA Name'] = MLA_name
MLA_compare.loc[row_index, 'MLA Parameters'] = str(alg.get_params())
cv_results = model_selection.cross_validate(alg, data1[data1_x_bin], data1[Target], cv = cv_split)
MLA_compare.loc[row_index, 'MLA Time'] = cv_results['fit_time'].mean()
MLA_compare.loc[row_index, 'MLA Test Accuracy Mean'] = cv_results['test_score'].mean()
MLA_compare.loc[row_index, 'MLA Test Accuracy 3*STD'] = cv_results['test_score'].std() *3
alg.fit(data1[data1_x_bin], data1[Target])
MLA_predict[MLA_name] = alg.predict(data1[data1_x_bin])
row_index+=1
MLA_compare.sort_values(by = ['MLA Test Accuracy Mean'], ascending = False, inplace = True)
MLA_compare
| Titanic - Machine Learning from Disaster |
9,293,880 | del x_train, y_train, x_test
gc.collect()<define_variables> | pivot_female = data1[data1.Sex=='female'].groupby(['Sex','Pclass', 'Embarked','FareBin'])['Survived'].mean()
print('Survival Decision Tree w/Female Node:
',pivot_female)
pivot_male = data1[data1.Sex=='male'].groupby(['Sex','Title'])['Survived'].mean()
print('
Survival Decision Tree w/Male Node:
',pivot_male ) | Titanic - Machine Learning from Disaster |
9,293,880 | pred_test =(pred_test_mlp + pred_test_lgb)/ 2.0<set_options> | def mytree(df):
Model = pd.DataFrame(data = {'Predict':[]})
male_title = ['Master']
for index, row in df.iterrows() :
Model.loc[index, 'Predict'] = 0
if(df.loc[index, 'Sex'] == 'female'):
Model.loc[index, 'Predict'] = 1
if(( df.loc[index, 'Sex'] == 'female')&
(df.loc[index, 'Pclass'] == 3)&
(df.loc[index, 'Embarked'] == 'S')&
(df.loc[index, 'Fare'] > 8)
):
Model.loc[index, 'Predict'] = 0
if(( df.loc[index, 'Sex'] == 'male')&
(df.loc[index, 'Title'] in male_title)
):
Model.loc[index, 'Predict'] = 1
return Model
Tree_Predict = mytree(data1)
print('Decision Tree Model Accuracy/Precision Score: {:.2f}%
'.format(metrics.accuracy_score(data1['Survived'], Tree_Predict)*100))
print(metrics.classification_report(data1['Survived'], Tree_Predict)) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.