kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
2,403,150 | gc.collect()
batch_size = 500
n_epochs = 1000
max_epochs_without_improving = 50
max_epochs_wo_lr_change = 7
max_time = 5.5
acc_data = {'acc_train':[], 'acc_val':[]}
batch_acc_data = []
batch_lr_data = []
start_time = datetime.datetime.now()
with tf.Session() as sess:
init.run()
max_val_acc = 0
epochs_wo_improvement = 0
epochs_since_lr_update = 0
gap = 1
batch_lr = lr_high
y_batch = np.ones(shape=[batch_size])
for epoch in range(n_epochs):
n_batch = 0
if epoch == 15:
batch_size = 1000
y_batch = np.ones(shape=[batch_size])
max_epochs_without_improving = 100
max_epochs_wo_lr_change = 10
if epoch == 25:
batch_size = 2000
y_batch = np.ones(shape=[batch_size])
if epoch == 40:
batch_size = 4000
y_batch = np.ones(shape=[batch_size])
max_epochs_wo_lr_change = 15
if epoch == 60:
batch_size = 8000
y_batch = np.ones(shape=[batch_size])
max_epochs_wo_lr_change = 20
for X1_batch, X2_batch, X3_batch in get_comp_batch(inds=train_comp_inds, batch_size=batch_size):
feed_dict = {X1:X1_batch, X2:X2_batch, X3:X3_batch, y:y_batch}
batch_lr_data.append(batch_lr)
batch_acc_data.append(sess.run(accuracy, feed_dict=feed_dict))
feed_dict = {X1:X1_batch, X2:X2_batch, X3:X3_batch, y:y_batch, training:True, lr:batch_lr}
sess.run([training_op, extra_training_ops], feed_dict=feed_dict)
n_batch += 1
if(epoch >= 20)and(epoch % 1 == 0):
curr_ids = np.random.permutation(train_comp_inds.shape[0])[:100000]
train_acc = get_full_set_accuracy(sess, train_comp_inds[curr_ids], batch_size=10000)
acc_data['acc_train'].append(train_acc)
print('epoch', epoch, 'time passed:',(datetime.datetime.now() - start_time), 'max lr:', batch_lr)
print('Accuracy: train:', train_acc)
if train_acc > max_val_acc:
epochs_wo_improvement = 0
epochs_since_lr_update = 0
max_val_acc = train_acc
save_path_comp = saver_comp.save(sess, '.. /DNN_data/dnn_state_comp.ckpt')
print('- best so far!')
else:
epochs_wo_improvement += 1
epochs_since_lr_update += 1
if(epochs_wo_improvement > max_epochs_without_improving)or(( datetime.datetime.now() - kernel_start_time ).seconds/60/60 > max_time):
print('early breaking!')
break
if epochs_since_lr_update >= max_epochs_wo_lr_change:
batch_lr = batch_lr -(batch_lr-lr_high_2)*0.33
epochs_since_lr_update = 0
saver_comp.restore(sess, save_path_comp)
curr_ids = np.random.permutation(train_comp_inds.shape[0])[:100000]
train_acc = get_full_set_accuracy(sess, train_comp_inds[curr_ids], batch_size=10000)
print('Final accuracy: train:', train_acc )<split> | ticket_counts = all['Ticket'].value_counts()
all['GrSize'] = all.apply(lambda s: ticket_counts.loc[s['Ticket']], axis=1)
all['Cabin'].fillna('U',inplace=True)
all['hasCabin'] = all.apply(lambda s: 0 if s['Cabin'] == 'U' else 1,axis = 1 ) | Titanic - Machine Learning from Disaster |
2,403,150 | def generate_val_comp_data(X, gap):
if X.shape[0] > gap:
X = X.sort_values(by=['winPlacePerc'])
group_data = X[group_cols].values
part0 = X[['matchId', 'groupId']].values[gap:,:]
part0 = np.c_[part0, X[['matchId', 'groupId']].values[:-gap,:]]
part1 = group_data[gap:,:]
part2 = group_data[:-gap,:]
part3 = X[other_cols].values[:-gap,:]
return part0, part1, part2, part3
return np.empty(shape=(0, 4)) , np.empty(shape=(0, len(group_cols))), np.empty(shape=(0, len(group_cols))), np.empty(shape=(0, len(other_cols)))
def get_val_batch(data, batch_size=1000, gap=1):
matchIds = data['matchId'].unique().tolist()
for i in range(int(np.ceil(len(matchIds)/batch_size))):
match_ids_batch = matchIds[i*batch_size:(i+1)*batch_size]
grouped_data = data.loc[data['matchId'].isin(match_ids_batch)].groupby('matchId' ).apply(lambda x: generate_val_comp_data(x, gap))
part0, part1, part2, part3 = list(map(np.vstack, zip(*grouped_data)))
yield part0, part1, part2, part3<merge> | all['Fname'] = all.Name.str.extract('^ (.+?),', expand=False)
Pas_wSib = []
all_x_0 = all[(all['SibSp'] > 0)&(all['Parch'] == 0)]
name_counts_SibSp = all_x_0['Fname'].value_counts()
for label, value in name_counts_SibSp.items() :
entries = all_x_0[all_x_0['Fname'] == label]
if(entries.shape[0] > 1 and(not(entries['Title'] == 'Mrs' ).any())) or \
(entries.shape[0] == 1 and entries['Title'].values[0] == 'Mrs'):
Pas_wSib.extend(entries['PassengerId'].values.tolist())
else:
Pas_wSib.extend(\
entries[(entries['Title'] == 'Miss')|(entries['GrSize'] == 1)]['PassengerId'].values.tolist())
Mrs_wPar = []
all_x_y = all[all['Parch'] > 0]
name_counts_Parch = all_x_y['Fname'].value_counts()
for label, value in name_counts_Parch.items() :
entries = all_x_y[all_x_y['Fname'] == label]
if entries.shape[0] == 1:
if entries['Title'].values[0] == 'Mrs' and entries['Age'].values[0] <= 30:
Mrs_wPar.extend(entries['PassengerId'].values.tolist())
def get_features(row):
features = pd.Series(0, index = ['wSib','wSp','wCh','wPar'])
if row['PassengerId'] in Pas_wSib:
features['wSib'] = 1
else:
if(row['SibSp'] != 0)&(row['Parch'] == 0):
features['wSp'] = 1
else:
if(( row['Title']=='Mrs')&(not row['PassengerId'] in Mrs_wPar)) | \
(( row['Title']=='Mr')&(not row['PassengerId'] == 680)&
(((row['Pclass']==1)&(row['Age']>=30)) |
(( row['Pclass']==2)&(row['Age']>=25)) |
(( row['Pclass']==3)&(row['Age']>=20)))) :
features['wCh'] = 1
else:
features['wPar'] = 1
return features
all[['wSib','wSp','wCh','wPar']] = all.apply(lambda s: get_features(s)if s['isAlone'] == 0 else [0,0,0,0], axis = 1 ) | Titanic - Machine Learning from Disaster |
2,403,150 | def reorder_val(data, ids, y_comp_pred):
df = pd.DataFrame(np.c_[ids, y_comp_pred.reshape(-1,1)], columns=['matchId', 'groupId', 'ordered'])
data = data.merge(df, on=['matchId', 'groupId'], how='left')
data['ordered'].fillna(1, inplace=True)
data.sort_values(by=['matchId','winPlacePerc'], inplace=True)
zero_inds = np.nonzero(data['ordered'].values == 0)[0]
i = 0
old_order = []
new_order = []
wPP_col_num = data.columns.get_loc('winPlacePerc')
while i < len(zero_inds):
start = zero_inds[i]
while(i+1 < len(zero_inds)) and(zero_inds[i+1] - zero_inds[i] == 1):
i += 1
end = zero_inds[i]
length = end - start + 1
i += 1
old_order.extend([i for i in range(start-1, start+length, 1)])
new_order.extend([i for i in range(start+length-1, start-2, -1)])
data.iloc[old_order, wPP_col_num] = data.iloc[new_order, wPP_col_num].values
print('rows swapped')
data.drop(columns=['ordered'], inplace=True)
return data<define_variables> | all = all.drop(['Fname','Name','Cabin','Ticket','Fare','SibSp','Parch'], axis = 1 ) | Titanic - Machine Learning from Disaster |
2,403,150 | train = None
train_idx = None
train_ids = None
gc.collect()<data_type_conversions> | all[all['Pclass'] == 1].groupby(['Title','isAlone','wSib','wSp','wCh','wPar'])['Survived'].agg(['count','size','mean'] ) | Titanic - Machine Learning from Disaster |
2,403,150 | test, test_ids = pipeline.transform(test_path)
for num, col in enumerate(test.columns):
if col not in ['winPlacePerc', 'groupId', 'matchId']:
test[col] = test[col].astype(np.float32)
test[col] =(( test[col] - mean_vals[col])/std_vals[col] ).astype(np.float32)
gc.collect()<prepare_x_and_y> | all[(all['Pclass'] == 1)&(all['Title'] == 'Mr')].groupby(['hasCabin','isAlone','wSib','wSp','wCh','wPar'])['Survived'].agg(['count','size','mean'] ) | Titanic - Machine Learning from Disaster |
2,403,150 | y_test_dnn = pd.DataFrame(np.zeros(shape=[1,3]), columns=['matchId', 'groupId', 'winPlacePerc_pred'])
predict_batch_size = 10000
tf.reset_default_graph()
saver = tf.train.import_meta_graph('.. /DNN_data/dnn_state.ckpt.meta')
X = tf.get_default_graph().get_tensor_by_name('X:0')
output = tf.get_default_graph().get_tensor_by_name('layers/output/BiasAdd:0')
init = tf.global_variables_initializer()
with tf.Session() as sess:
init.run()
saver.restore(sess, '.. /DNN_data/dnn_state.ckpt')
n_batches = int(np.ceil(test.shape[0]/predict_batch_size))
for batch_n in range(n_batches):
data_batch = test.iloc[batch_n*predict_batch_size:(batch_n+1)*predict_batch_size]
X_batch = data_batch.drop(columns=['matchId','groupId'])
y_batch = data_batch[['matchId','groupId']]
y_batch['winPlacePerc_pred'] = output.eval(session=sess, feed_dict={X: X_batch})
y_test_dnn = y_test_dnn.append(y_batch, ignore_index=True)
y_test_dnn = y_test_dnn.iloc[1:,:]<categorify> | all[all['Pclass'] == 2].groupby(['Title','isAlone','wSib','wSp','wCh','wPar'])['Survived'].agg(['count','size','mean'] ) | Titanic - Machine Learning from Disaster |
2,403,150 | def rank_align_predictions(X, y, scaled=False):
X['winPlacePerc'] = y
X['rank'] = X.groupby(['matchId'])['winPlacePerc'].rank(method='dense')
X['max_rank'] = X.groupby(['matchId'])['rank'].transform(np.max)
adj_winPlacePerc =(X['rank'] - 1)/(X['max_rank'] - 1 + 0.0000000001)
X.drop(columns=['winPlacePerc', 'rank', 'max_rank'], inplace=True)
return adj_winPlacePerc
def fix_predictions(X, y, scaled=False):
y = y.copy()
y[y > 1.0] = 1.0
y[y < 0.0] = 0.0
if scaled:
max_places = X['maxPlace'].values*std_vals['maxPlace'] + mean_vals['maxPlace']
num_groups = X['numGroups'].values*std_vals['numGroups'] + mean_vals['numGroups']
else:
max_places = X['maxPlace'].values
num_groups = X['numGroups'].values
multiplier =(max_places[max_places > 1] - 1 ).astype(np.float32)
y[max_places > 1] = np.round(y[max_places > 1] * multiplier)/ multiplier
y[max_places == 1] = 1.0
y[max_places <= 0] = 0.0
y[num_groups <= 1] = 0.0
return y<merge> | all[all['Pclass'] == 3].groupby(['Title','isAlone','wSib','wSp','wCh','wPar'])['Survived'].agg(['count','size','mean'] ) | Titanic - Machine Learning from Disaster |
2,403,150 | y_test_dnn['winPlacePerc_pred'] = rank_align_predictions(y_test_dnn.drop(columns=['winPlacePerc_pred']), y_test_dnn['winPlacePerc_pred'])
y_test_dnn = y_test_dnn.merge(test[['matchId', 'groupId', 'numGroups', 'maxPlace']], on=['matchId', 'groupId'])
y_test_dnn['winPlacePerc_pred'] = fix_predictions(y_test_dnn.drop(columns=['winPlacePerc_pred']), y_test_dnn['winPlacePerc_pred'], scaled=True )<merge> | all[(all['Pclass'] == 3)&(all['Title'] != 'Mr')].groupby(['Title','FamSize'])['Survived'].agg(['count','size','mean'] ) | Titanic - Machine Learning from Disaster |
2,403,150 | test = test.merge(y_test_dnn[['matchId', 'groupId', 'winPlacePerc_pred']], on=['matchId', 'groupId'])
test.rename(columns={'winPlacePerc_pred':'winPlacePerc'}, inplace=True )<prepare_x_and_y> | all[(all['Pclass'] == 3)&(all['Title'] != 'Mr')].groupby(['Title','FamSizeBin','isAlone','wSib','wSp','wCh','wPar'])['Survived'].agg(['count','size','mean'] ) | Titanic - Machine Learning from Disaster |
2,403,150 | n_epochs = 6
batch_size = 10000
comp_thresh = [-0.1, 0, -0.1]
y_comp_diff_data = {}
y_comp_pred_data = {}
tf.reset_default_graph()
saver_comp = tf.train.import_meta_graph('.. /DNN_data/dnn_state_comp.ckpt.meta')
X1 = tf.get_default_graph().get_tensor_by_name('X1:0')
X2 = tf.get_default_graph().get_tensor_by_name('X2:0')
X3 = tf.get_default_graph().get_tensor_by_name('X3:0')
output = tf.get_default_graph().get_tensor_by_name('layers/output:0')
init = tf.global_variables_initializer()
with tf.Session() as sess:
saver_comp.restore(sess, save_path_comp)
for epoch in range(n_epochs):
y_comp_pred = np.empty(shape=[0])
y_comp_diff = np.empty(shape=[0])
all_ids = np.empty(shape=[0, 2])
for batch_ids, X1_batch, X2_batch, X3_batch in get_val_batch(test, batch_size):
feed_dict = {X1:X1_batch, X2:X2_batch, X3:X3_batch}
batch_output = output.eval(feed_dict=feed_dict ).reshape(-1)
assert len(batch_output)% 2 ==0
y_batch_pred = batch_output[:int(len(batch_output)/2)]
y_batch_comp = batch_output[:int(len(batch_output)/2)] - batch_output[int(len(batch_output)/2):]
y_comp_pred = np.r_[y_comp_pred, y_batch_pred]
y_comp_diff = np.r_[y_comp_diff, y_batch_comp]
all_ids = np.r_[all_ids, batch_ids[:,:2]]
y_comp_pred_data[epoch] = y_comp_pred
y_comp_diff_data[epoch] = y_comp_diff
thresh_ind = np.minimum(len(comp_thresh)-1, epoch)
y_comp_diff = y_comp_diff > comp_thresh[thresh_ind]
print('epoch:', epoch, 'number of zeros:', len(np.nonzero(y_comp_diff == 0)[0]))
if np.min(y_comp_diff)== 0:
test = reorder_val(test, all_ids, y_comp_diff)
else:
break<merge> | def get_survived_1(row):
if row['Pclass'] in [1,2]:
if row['Title'] == 'Mr':
survived = 0
else:
survived = 1
else:
if row['Title'] == 'Mr' or row['FamSizeBin'] == 1:
survived = 0
else:
survived = 1
return survived | Titanic - Machine Learning from Disaster |
2,403,150 | def create_submission_table(X, y, id_table):
out = X[['matchId', 'groupId']]
out['winPlacePerc'] = y
out = id_table.merge(out, on=['matchId', 'groupId'])
out = out.drop(columns=['groupId', 'matchId'])
return out<save_to_csv> | X_train = all.iloc[:891,:]
X_test = all.iloc[891:,:]
y_train = all.iloc[:891,:]['Survived']
y_train_hat = X_train.apply(lambda s: get_survived_1(s), axis = 1)
predictions = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': 0})
predictions['Survived'] = X_test.apply(lambda s: get_survived_1(s), axis = 1)
predictions.to_csv('submission-1.csv', index=False)
score = metrics.accuracy_score(y_train_hat, y_train)
print('Train Accuracy: {}'.format(score)) | Titanic - Machine Learning from Disaster |
2,403,150 | submission = create_submission_table(test, test['winPlacePerc'], test_ids)
submission.to_csv('submission.csv', index=False)
submission.head(50 )<import_modules> | all[(all['Pclass'] == 3)&(all['Title'] != 'Mr')&(all['FamSizeBin'] == 0)].groupby(['Title','Embarked'])['Survived'].agg(['count','size','mean'] ) | Titanic - Machine Learning from Disaster |
2,403,150 | import gc
import sys
import numpy as np
import pandas as pd<categorify> | def get_survived_2(row):
if row['Pclass'] in [1,2]:
if row['Title'] == 'Mr':
survived = 0
else:
survived = 1
else:
if row['Title'] == 'Mr' or row['FamSizeBin'] == 1 or(row['Title'] == 'Miss' and row['Embarked'] == 'S'):
survived = 0
else:
survived = 1
return survived | Titanic - Machine Learning from Disaster |
2,403,150 | def df_footprint_reduce(df, skip_obj=False, skip_int=False, skip_float=False, print_comparison=True):
if print_comparison:
print(f"Dataframe size before shrinking column types into smallest possible: {round(( sys.getsizeof(df)/1024/1024),4)} MB")
for column in df.columns:
if(skip_obj is False)and(str(df[column].dtype)[:6] == 'object'):
num_unique_values = len(df[column].unique())
num_total_values = len(df[column])
if num_unique_values / num_total_values < 0.5:
df.loc[:,column] = df[column].astype('category')
else:
df.loc[:,column] = df[column]
elif(skip_int is False)and(str(df[column].dtype)[:3] == 'int'):
if df[column].min() > np.iinfo(np.int8 ).min and df[column].max() < np.iinfo(np.int8 ).max:
df[column] = df[column].astype(np.int8)
elif df[column].min() > np.iinfo(np.int16 ).min and df[column].max() < np.iinfo(np.int16 ).max:
df[column] = df[column].astype(np.int16)
elif df[column].min() > np.iinfo(np.int32 ).min and df[column].max() < np.iinfo(np.int32 ).max:
df[column] = df[column].astype(np.int32)
elif(skip_float is False)and(str(df[column].dtype)[:5] == 'float'):
if df[column].min() > np.finfo(np.float16 ).min and df[column].max() < np.finfo(np.float16 ).max:
df[column] = df[column].astype(np.float16)
elif df[column].min() > np.finfo(np.float32 ).min and df[column].max() < np.finfo(np.float32 ).max:
df[column] = df[column].astype(np.float32)
if print_comparison:
print(f"Dataframe size after shrinking column types into smallest possible: {round(( sys.getsizeof(df)/1024/1024),4)} MB")
return df<feature_engineering> | y_train_hat = X_train.apply(lambda s: get_survived_2(s), axis = 1)
predictions['Survived'] = X_test.apply(lambda s: get_survived_2(s), axis = 1)
predictions.to_csv('submission-2.csv', index=False)
score = metrics.accuracy_score(y_train_hat, y_train)
print('Train Accuracy: {}'.format(score)) | Titanic - Machine Learning from Disaster |
2,403,150 | def feature_engineering(df,is_train=True):
if is_train:
df = df[df['maxPlace'] > 1].copy()
target = 'winPlacePerc'
print('Grouping similar match types together')
df.loc[(df['matchType'] == 'solo'), 'matchType'] = 1
df.loc[(df['matchType'] == 'normal-solo'), 'matchType'] = 1
df.loc[(df['matchType'] == 'solo-fpp'), 'matchType'] = 1
df.loc[(df['matchType'] == 'normal-solo-fpp'), 'matchType'] = 1
df.loc[(df['matchType'] == 'duo'), 'matchType'] = 2
df.loc[(df['matchType'] == 'normal-duo'), 'matchType'] = 2
df.loc[(df['matchType'] == 'duo-fpp'), 'matchType'] = 2
df.loc[(df['matchType'] == 'normal-duo-fpp'), 'matchType'] = 2
df.loc[(df['matchType'] == 'squad'), 'matchType'] = 3
df.loc[(df['matchType'] == 'normal-squad'), 'matchType'] = 3
df.loc[(df['matchType'] == 'squad-fpp'), 'matchType'] = 3
df.loc[(df['matchType'] == 'normal-squad-fpp'), 'matchType'] = 3
df.loc[(df['matchType'] == 'flaretpp'), 'matchType'] = 0
df.loc[(df['matchType'] == 'flarefpp'), 'matchType'] = 0
df.loc[(df['matchType'] == 'crashtpp'), 'matchType'] = 0
df.loc[(df['matchType'] == 'crashfpp'), 'matchType'] = 0
df.loc[(df['rankPoints'] < 0), 'rankPoints'] = 0
print('Adding new features using existing ones')
df['headshotrate'] = df['kills']/df['headshotKills']
df['killStreakrate'] = df['killStreaks']/df['kills']
df['healthitems'] = df['heals'] + df['boosts']
df['totalDistance'] = df['rideDistance'] + df["walkDistance"] + df["swimDistance"]
df['killPlace_over_maxPlace'] = df['killPlace'] / df['maxPlace']
df['headshotKills_over_kills'] = df['headshotKills'] / df['kills']
df['distance_over_weapons'] = df['totalDistance'] / df['weaponsAcquired']
df['walkDistance_over_heals'] = df['walkDistance'] / df['heals']
df['walkDistance_over_kills'] = df['walkDistance'] / df['kills']
df['killsPerWalkDistance'] = df['kills'] / df['walkDistance']
df['skill'] = df['headshotKills'] + df['roadKills']
print('Adding normalized features')
df['playersJoined'] = df.groupby('matchId')['matchId'].transform('count')
gc.collect()
df['killsNorm'] = df['kills']*(( 100-df['playersJoined'])/100 + 1)
df['damageDealtNorm'] = df['damageDealt']*(( 100-df['playersJoined'])/100 + 1)
df['maxPlaceNorm'] = df['maxPlace']*(( 100-df['playersJoined'])/100 + 1)
df['matchDurationNorm'] = df['matchDuration']*(( 100-df['playersJoined'])/100 + 1)
df['headshotKillsNorm'] = df['headshotKills']*(( 100-df['playersJoined'])/100 + 1)
df['killPlaceNorm'] = df['killPlace']*(( 100-df['playersJoined'])/100 + 1)
df['killPointsNorm'] = df['killPoints']*(( 100-df['playersJoined'])/100 + 1)
df['killStreaksNorm'] = df['killStreaks']*(( 100-df['playersJoined'])/100 + 1)
df['longestKillNorm'] = df['longestKill']*(( 100-df['playersJoined'])/100 + 1)
df['roadKillsNorm'] = df['roadKills']*(( 100-df['playersJoined'])/100 + 1)
df['teamKillsNorm'] = df['teamKills']*(( 100-df['playersJoined'])/100 + 1)
df['damageDealtNorm'] = df['damageDealt']*(( 100-df['playersJoined'])/100 + 1)
df['DBNOsNorm'] = df['DBNOs']*(( 100-df['playersJoined'])/100 + 1)
df['revivesNorm'] = df['revives']*(( 100-df['playersJoined'])/100 + 1)
df = df_null_cleaner(df,fill_with=0)
features = list(df.columns)
features.remove("Id")
features.remove("matchId")
features.remove("groupId")
features.remove("matchType")
features.remove("maxPlace")
y = pd.DataFrame()
if is_train:
print('Preparing target variable')
y = df.groupby(['matchId','groupId'])[target].agg('mean')
gc.collect()
features.remove(target)
print('Aggregating means')
means_features = list(df.columns)
means_features.remove("Id")
means_features.remove("matchId")
means_features.remove("groupId")
means_features.remove("matchType")
means_features.remove("maxPlace")
means_features.remove("playersJoined")
means_features.remove("matchDuration")
means_features.remove("numGroups")
means_features.remove("teamKillsNorm")
if is_train:
means_features.remove(target)
agg = df.groupby(['matchId','groupId'])[means_features].agg('mean')
gc.collect()
agg_rank = agg.groupby('matchId')[means_features].rank(pct=True ).reset_index()
gc.collect()
if is_train:
X = agg.reset_index() [['matchId','groupId']]
else:
X = df[['matchId','groupId']]
X = X.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
X = X.merge(agg_rank, suffixes=["_mean", "_mean_rank"], how='left', on=['matchId', 'groupId'])
del agg, agg_rank
gc.collect()
print('Aggregating maxes')
maxes_features = list(df.columns)
maxes_features.remove("Id")
maxes_features.remove("matchId")
maxes_features.remove("groupId")
maxes_features.remove("matchType")
maxes_features.remove("DBNOsNorm")
maxes_features.remove("damageDealtNorm")
maxes_features.remove("headshotKillsNorm")
maxes_features.remove("killPlaceNorm")
maxes_features.remove("killPlace_over_maxPlace")
maxes_features.remove("killPointsNorm")
maxes_features.remove("killStreaksNorm")
maxes_features.remove("killsNorm")
maxes_features.remove("longestKillNorm")
maxes_features.remove("matchDurationNorm")
maxes_features.remove("matchDuration")
maxes_features.remove("maxPlaceNorm")
maxes_features.remove("maxPlace")
maxes_features.remove("numGroups")
maxes_features.remove("playersJoined")
maxes_features.remove("revivesNorm")
maxes_features.remove("roadKillsNorm")
maxes_features.remove("teamKillsNorm")
if is_train:
maxes_features.remove(target)
agg = df.groupby(['matchId','groupId'])[maxes_features].agg('max')
gc.collect()
agg_rank = agg.groupby('matchId')[maxes_features].rank(pct=True ).reset_index()
gc.collect()
X = X.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
X = X.merge(agg_rank, suffixes=["_max", "_max_rank"], how='left', on=['matchId', 'groupId'])
del agg, agg_rank
gc.collect()
print('Aggregating mins')
mins_features = list(df.columns)
mins_features.remove("Id")
mins_features.remove("matchId")
mins_features.remove("groupId")
mins_features.remove("matchType")
mins_features.remove("DBNOsNorm")
mins_features.remove("damageDealtNorm")
mins_features.remove("headshotKillsNorm")
mins_features.remove("killPlaceNorm")
mins_features.remove("killPlace_over_maxPlace")
mins_features.remove("killPointsNorm")
mins_features.remove("killStreaksNorm")
mins_features.remove("killsNorm")
mins_features.remove("longestKillNorm")
mins_features.remove("matchDurationNorm")
mins_features.remove("matchDuration")
mins_features.remove("maxPlaceNorm")
mins_features.remove("maxPlace")
mins_features.remove("numGroups")
mins_features.remove("playersJoined")
mins_features.remove("revivesNorm")
mins_features.remove("roadKillsNorm")
mins_features.remove("teamKillsNorm")
if is_train:
mins_features.remove(target)
agg = df.groupby(['matchId','groupId'])[mins_features].agg('min')
gc.collect()
agg_rank = agg.groupby('matchId')[mins_features].rank(pct=True ).reset_index()
gc.collect()
X = X.merge(agg.reset_index() , suffixes=["", ""], how='left', on=['matchId', 'groupId'])
X = X.merge(agg_rank, suffixes=["_min", "_min_rank"], how='left', on=['matchId', 'groupId'])
del agg, agg_rank
gc.collect()
print('Aggregating group sizes')
agg = df.groupby(['matchId','groupId'] ).size().reset_index(name='group_size')
gc.collect()
X = X.merge(agg, how='left', on=['matchId', 'groupId'])
print('Aggregating match means')
agg = df.groupby(['matchId'])[features].agg('mean' ).reset_index()
gc.collect()
X = X.merge(agg, suffixes=["", "_match_mean"], how='left', on=['matchId'])
print('Aggregating match sizes')
agg = df.groupby(['matchId'] ).size().reset_index(name='match_size')
gc.collect()
X = X.merge(agg, how='left', on=['matchId'])
del df, agg
gc.collect()
X.drop(columns = ['matchId',
'groupId'
], axis=1, inplace=True)
gc.collect()
if is_train:
return X, y
return X<load_from_csv> | all[(all['Pclass'] == 3)&(all['Title'] == 'Miss')&(all['FamSizeBin'] == 0)].groupby(['Title','wPar','Embarked'])['Survived'].agg(['count','size','mean'] ) | Titanic - Machine Learning from Disaster |
2,403,150 | X_train = pd.read_csv('.. /input/train_V2.csv', engine='c' )<concatenate> | def get_survived_3(row):
if row['Pclass'] in [1,2]:
if row['Title'] == 'Mr':
survived = 0
else:
survived = 1
else:
if row['Title'] == 'Mr' or row['FamSizeBin'] == 1 or \
(row['Title'] == 'Miss' and row['Embarked'] == 'S' and row['wPar'] == 0):
survived = 0
else:
survived = 1
return survived | Titanic - Machine Learning from Disaster |
2,403,150 | X_train = df_footprint_reduce(X_train, skip_obj=True)
gc.collect()<feature_engineering> | y_train_hat = X_train.apply(lambda s: get_survived_3(s), axis = 1)
predictions['Survived'] = X_test.apply(lambda s: get_survived_3(s), axis = 1)
predictions.to_csv('submission-3.csv', index=False)
score = metrics.accuracy_score(y_train_hat, y_train)
print('Train Accuracy: {}'.format(score)) | Titanic - Machine Learning from Disaster |
2,403,150 | X_train, y_train = feature_engineering(X_train, True)
gc.collect()<concatenate> | all['Sex'] = all['Sex'].map({'male': 0, 'female': 1} ).astype(int)
all['Embarked'].fillna(all['Embarked'].value_counts().index[0], inplace=True)
all_dummies = pd.get_dummies(all, columns = ['Title','Pclass','Embarked'],\
prefix=['Title','Pclass','Embarked'], drop_first = True)
all_dummies = all_dummies.drop(['PassengerId','Survived'], axis = 1 ) | Titanic - Machine Learning from Disaster |
2,403,150 | X_train = df_footprint_reduce(X_train, skip_obj=True)
gc.collect()<import_modules> | all_dummies_i = pd.DataFrame(data=KNN(k=3, verbose = False ).fit_transform(all_dummies ).astype(int),
columns=all_dummies.columns, index=all_dummies.index ) | Titanic - Machine Learning from Disaster |
2,403,150 | from sklearn.model_selection import train_test_split, GridSearchCV<split> | all_dummies_i['isAlwSib'] = all_dummies_i.apply(lambda s: 1 if(s['isAlone'] == 1)|(s['wSib'] == 1)else 0 ,axis = 1)
all_dummies_i = all_dummies_i.drop(['isAlone','wSib','Sex','GrSize'], axis = 1 ) | Titanic - Machine Learning from Disaster |
2,403,150 | X_train, X_validation, y_train, y_validation = train_test_split(X_train,
y_train,
test_size=0.2)
gc.collect()<import_modules> | X_train = all_dummies_i.iloc[:891,:]
X_test = all_dummies_i.iloc[891:,:] | Titanic - Machine Learning from Disaster |
2,403,150 | import lightgbm as lgb<train_model> | scaler = StandardScaler()
scaler.fit(X_train[['Age']])
X_train['Age'] = scaler.transform(X_train[['Age']])
X_test['Age'] = scaler.transform(X_test[['Age']] ) | Titanic - Machine Learning from Disaster |
2,403,150 | parameters = { 'objective': 'regression_l1',
'learning_rate': 0.01
}<train_on_grid> | cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=10, random_state=1 ) | Titanic - Machine Learning from Disaster |
2,403,150 | def find_best_hyperparameters(model):
gridParams = {
'learning_rate' : [0.1, 0.01 , 0.05],
'n_estimators ' : [1000, 10000, 20000],
'bagging_fraction' : [0.5, 0.6 ,0.7],
'feature_fraction' : [0.5, 0.6 ,0.7],
'num_leaves' : [31, 80, 140]
}
grid = GridSearchCV(model,
gridParams,
verbose=5,
cv=3)
grid.fit(X_train, y_train)
print('Best parameters: %s' % grid.best_params_)
print('Accuracy: %.2f' % grid.best_score_)
return<create_dataframe> | svm_grid = {'C': [10,11,12,13,14,15,16,17,18,19,20], 'gamma': ['auto']}
svm_search = GridSearchCV(estimator = SVC() , param_grid = svm_grid, cv = cv, refit=True, n_jobs=1 ) | Titanic - Machine Learning from Disaster |
2,403,150 | X_train = lgb.Dataset(X_train, label=y_train)
X_validation = lgb.Dataset(X_validation, label=y_validation)
gc.collect()<train_model> | svm_search.fit(X_train, train['Survived'])
svm_best = svm_search.best_estimator_
print("Cross-validation accuracy: {}, standard deviation: {}, with parameters {}"
.format(svm_search.best_score_, svm_search.cv_results_['std_test_score'][svm_search.best_index_],
svm_search.best_params_)) | Titanic - Machine Learning from Disaster |
2,403,150 | %%time
model = lgb.train(parameters,
X_train,
num_boost_round = 40000,
valid_sets=[X_validation,X_train] )<compute_test_metric> | y_train_hat = svm_best.predict(X_train)
print('Train Accuracy: {}'
.format(metrics.accuracy_score(y_train_hat, y_train)))
predictions['Survived'] = svm_best.predict(X_test)
predictions.to_csv('submission-svm.csv', index=False ) | Titanic - Machine Learning from Disaster |
2,403,150 | <import_modules><EOS> | def get_survived_svm_rule(row):
if row['Pclass'] in [1,2]:
if row['Title'] == 'Mr':
survived = 0
else:
survived = 1
else:
if row['Title'] == 'Mr' or row['FamSizeBin'] == 1 or \
(row['Title'] == 'Miss' and row['Embarked'] == 'S' and row['Age'] >= 18):
survived = 0
else:
survived = 1
return survived | Titanic - Machine Learning from Disaster |
6,786,557 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<filter> | warnings.filterwarnings("ignore" ) | Titanic - Machine Learning from Disaster |
6,786,557 | feature_imp[feature_imp['Value']==0]<drop_column> | warnings.filterwarnings("ignore" ) | Titanic - Machine Learning from Disaster |
6,786,557 | del X_train, X_validation, y_train, y_validation, feature_imp
gc.collect()<load_from_csv> | gender_submission = pd.read_csv(".. /input/titanic/gender_submission.csv")
test = pd.read_csv(".. /input/titanic/test.csv")
train = pd.read_csv(".. /input/titanic/train.csv")
test["Survived"] = np.nan | Titanic - Machine Learning from Disaster |
6,786,557 | test_x = pd.read_csv('.. /input/test_V2.csv', engine='c' )<concatenate> | dataset = pd.concat([train,test],axis=0 ).reset_index(drop=True)
dataset = dataset.fillna(np.nan ) | Titanic - Machine Learning from Disaster |
6,786,557 | test_x = df_footprint_reduce(test_x, skip_obj=True)
gc.collect()<feature_engineering> | dataset.isnull().sum(axis = 0 ) | Titanic - Machine Learning from Disaster |
6,786,557 | test_x = feature_engineering(test_x, False)
gc.collect()<predict_on_test> | dataset["Family"] = dataset["SibSp"] + dataset["Parch"] + 1
train["Family"] = train["SibSp"] + train["Parch"] + 1
test["Family"] = test["SibSp"] + test["Parch"] + 1 | Titanic - Machine Learning from Disaster |
6,786,557 | pred_test = model.predict(test_x, num_iteration=model.best_iteration)
del test_x
gc.collect()<load_from_csv> | dataset = dataset.drop(columns=["SibSp","Parch"])
train = train.drop(columns=["SibSp","Parch"])
test = test.drop(columns=["SibSp","Parch"] ) | Titanic - Machine Learning from Disaster |
6,786,557 | test_set = pd.read_csv('.. /input/test_V2.csv', engine='c' )<merge> | dataset.Family = list(map(lambda x: 'Big' if x > 4 else('Single' if x == 1 else 'Medium'), dataset.Family))
train.Family = list(map(lambda x: 'Big' if x > 4 else('Single' if x == 1 else 'Medium'), train.Family))
test.Family = list(map(lambda x: 'Big' if x > 4 else('Single' if x == 1 else 'Medium'), test.Family)) | Titanic - Machine Learning from Disaster |
6,786,557 | submission = pd.read_csv(".. /input/sample_submission_V2.csv")
submission['winPlacePerc'] = pred_test
submission.loc[submission.winPlacePerc < 0, "winPlacePerc"] = 0
submission.loc[submission.winPlacePerc > 1, "winPlacePerc"] = 1
submission = submission.merge(test_set[["Id", "matchId", "groupId", "maxPlace", "numGroups"]], on="Id", how="left")
submission_group = submission.groupby(["matchId", "groupId"] ).first().reset_index()
submission_group["rank"] = submission_group.groupby(["matchId"])["winPlacePerc"].rank()
submission_group = submission_group.merge(
submission_group.groupby("matchId")["rank"].max().to_frame("max_rank" ).reset_index() ,
on="matchId", how="left")
submission_group["adjusted_perc"] =(submission_group["rank"] - 1)/(submission_group["numGroups"] - 1)
submission = submission.merge(submission_group[["adjusted_perc", "matchId", "groupId"]], on=["matchId", "groupId"], how="left")
submission["winPlacePerc"] = submission["adjusted_perc"]
submission.loc[submission.maxPlace == 0, "winPlacePerc"] = 0
submission.loc[submission.maxPlace == 1, "winPlacePerc"] = 1
subset = submission.loc[submission.maxPlace > 1]
gap = 1.0 /(subset.maxPlace.values - 1)
new_perc = np.around(subset.winPlacePerc.values / gap)* gap
submission.loc[submission.maxPlace > 1, "winPlacePerc"] = new_perc
submission.loc[(submission.maxPlace > 1)&(submission.numGroups == 1), "winPlacePerc"] = 0
assert submission["winPlacePerc"].isnull().sum() == 0
submission[["Id", "winPlacePerc"]].to_csv("submission.csv", index=False )<import_modules> | dataset.Sex = dataset.Sex.map({'male': 0, 'female': 1})
train.Sex = train.Sex.map({'male': 0, 'female': 1})
test.Sex = test.Sex.map({'male': 0, 'female': 1} ) | Titanic - Machine Learning from Disaster |
6,786,557 | import numpy as np
import pandas as pd
import tqdm
import matplotlib.pyplot as plt
import keras
from keras.layers.core import Dense
from keras.layers.normalization import BatchNormalization
from sklearn.preprocessing import MinMaxScaler, RobustScaler, MaxAbsScaler<load_from_csv> | dataset["Fare"] = dataset["Fare"].fillna(dataset["Fare"].median())
train["Fare"] = train["Fare"].fillna(dataset["Fare"].median())
test["Fare"] = test["Fare"].fillna(dataset["Fare"].median() ) | Titanic - Machine Learning from Disaster |
6,786,557 | train = pd.read_csv(".. /input/train_V2.csv" )<create_dataframe> | dataset.Fare = list(map(lambda x: 'Very Low' if x <= 10
else('Low' if(x > 10 and x < 26)
else('Medium' if(x >= 26 and x <= 50)else 'High')) , dataset.Fare))
train.Fare = list(map(lambda x: 'Very Low' if x <= 10
else('Low' if(x > 10 and x < 26)
else('Medium' if(x >= 26 and x <= 50)else 'High')) , train.Fare))
test.Fare = list(map(lambda x: 'Very Low' if x <= 10
else('Low' if(x > 10 and x < 26)
else('Medium' if(x >= 26 and x <= 50)else 'High')) , test.Fare)) | Titanic - Machine Learning from Disaster |
6,786,557 | pd.DataFrame(train.dtypes, columns=["Type"] )<count_unique_values> | dataset.Embarked = dataset.Embarked.fillna('S')
train.Embarked = train.Embarked.fillna('S')
test.Embarked = test.Embarked.fillna('S' ) | Titanic - Machine Learning from Disaster |
6,786,557 | print("Number of record:", len(train), "
Number of Unique Id:", len(pd.unique(train.Id)) )<count_unique_values> | title = []
for i in dataset.Name.str.split(', '):
title.append(i[1].split('.')[0])
dataset["Title"] = title
title = []
for i in train.Name.str.split(', '):
title.append(i[1].split('.')[0])
train["Title"] = title
title = []
for i in test.Name.str.split(', '):
title.append(i[1].split('.')[0])
test["Title"] = title | Titanic - Machine Learning from Disaster |
6,786,557 | print("Number of match: ", len(pd.unique(train.matchId)) , "
Number of match(<9): ", sum(train.groupby("matchId" ).size() < 9))<groupby> | dataset = dataset.drop(columns=["Name"])
train = train.drop(columns=["Name"])
test = test.drop(columns=["Name"] ) | Titanic - Machine Learning from Disaster |
6,786,557 | temp = train.loc[train.matchId.isin(train.groupby("matchId" ).size() [train.groupby("matchId" ).size() < 9].index), :]
temp.loc[temp.matchId == "e263f4a227313a"]<create_dataframe> | cabin = []
for i in dataset.Cabin:
if type(i)!= float:
cabin.append(i[0])
else:
cabin.append('Z')
dataset.Cabin = cabin
cabin = []
for i in train.Cabin:
if type(i)!= float:
cabin.append(i[0])
else:
cabin.append('Z')
train.Cabin = cabin
cabin = []
for i in test.Cabin:
if type(i)!= float:
cabin.append(i[0])
else:
cabin.append('Z')
test.Cabin = cabin | Titanic - Machine Learning from Disaster |
6,786,557 | temp = pd.DataFrame(train.groupby("matchId" ).size() , columns=["player"])
temp.reset_index(level=0, inplace=True )<merge> | dataset.Cabin = dataset.Cabin.map({'B':'BCDE','C':'BCDE','D':'BCDE','E':'BCDE','A':'AFG','F':'AFG','G':'AFG','Z':'Z','T':'Z'})
train.Cabin = train.Cabin.map({'B':'BCDE','C':'BCDE','D':'BCDE','E':'BCDE','A':'AFG','F':'AFG','G':'AFG','Z':'Z','T':'Z'})
test.Cabin = test.Cabin.map({'B':'BCDE','C':'BCDE','D':'BCDE','E':'BCDE','A':'AFG','F':'AFG','G':'AFG','Z':'Z','T':'Z'} ) | Titanic - Machine Learning from Disaster |
6,786,557 | train = train.merge(temp, left_on="matchId", right_on="matchId" )<count_unique_values> | tickets = []
for i in dataset.Ticket:
tickets.append(i.split(' ')[-1][0])
dataset.Ticket = tickets
tickets = []
for i in train.Ticket:
tickets.append(i.split(' ')[-1][0])
train.Ticket = tickets
tickets = []
for i in test.Ticket:
tickets.append(i.split(' ')[-1][0])
test.Ticket = tickets | Titanic - Machine Learning from Disaster |
6,786,557 | print("Type: ", pd.unique(train.matchType), "
Count: ", len(pd.unique(train.matchType)) )<feature_engineering> | dataset.Ticket = list(map(lambda x: 4 if(x == 'L' or int(x)>= 4)else int(x), dataset.Ticket))
train.Ticket = list(map(lambda x: 4 if(x == 'L' or int(x)>= 4)else int(x), train.Ticket))
test.Ticket = list(map(lambda x: 4 if(x == 'L' or int(x)>= 4)else int(x), test.Ticket)) | Titanic - Machine Learning from Disaster |
6,786,557 | train["matchType_1"] = "-"
train.loc[(train.matchType == "solo-fpp")|
(train.matchType == "solo")|
(train.matchType == "normal-solo-fpp")|
(train.matchType == "normal-solo"), "matchType_1"] = "solo"
train.loc[(train.matchType == "duo-fpp")|
(train.matchType == "duo")|
(train.matchType == "normal-duo-fpp")|
(train.matchType == "normal-duo"), "matchType_1"] = "duo"
train.loc[(train.matchType == "squad-fpp")|
(train.matchType == "squad")|
(train.matchType == "normal-squad-fpp")|
(train.matchType == "normal-squad"), "matchType_1"] = "squad"
train.loc[(train.matchType == "flarefpp")|
(train.matchType == "flaretpp")|
(train.matchType == "crashfpp")|
(train.matchType == "crashtpp"), "matchType_1"] = "etc"<feature_engineering> | medians = pd.DataFrame(dataset.groupby(['Pclass', 'Title'])['Age'].median())
medians | Titanic - Machine Learning from Disaster |
6,786,557 | train["matchType_2"] = "-"
train.loc[(train.matchType == "solo-fpp")|
(train.matchType == "duo-fpp")|
(train.matchType == "squad-fpp")|
(train.matchType == "normal-solo-fpp")|
(train.matchType == "normal-duo-fpp")|
(train.matchType == "normal-squad-fpp")|
(train.matchType == "crashfpp")|
(train.matchType == "flarefpp"), "matchType_2"] = "fpp"
train.loc[(train.matchType == "solo")|
(train.matchType == "duo")|
(train.matchType == "squad")|
(train.matchType == "normal-solo")|
(train.matchType == "normal-duo")|
(train.matchType == "normal-squad")|
(train.matchType == "crashtpp")|
(train.matchType == "flaretpp"), "matchType_2"] = "tpp"<feature_engineering> | ages = []
for i in dataset[dataset.Age.isnull() == True][["Pclass","Title"]].values:
ages.append(medians.ix[(i[0], i[1])].Age)
dataset.Age[dataset.Age.isnull() == True] = ages | Titanic - Machine Learning from Disaster |
6,786,557 | train["solo"] = 0
train["duo"] = 0
train["squad"] = 0
train["etc"] = 0
train.loc[train.matchType_1 == "solo", "solo"] = 1
train.loc[train.matchType_1 == "duo", "duo"] = 1
train.loc[train.matchType_1 == "squad", "squad"] = 1
train.loc[train.matchType_1 == "etc", "etc"] = 1<feature_engineering> | index = dataset[dataset.Age.isnull() == True].index
train_idx = index[index <= 890]
test_idx = index[index > 890]
train['Age'][train.index.isin(train_idx)] = dataset['Age'][dataset.index.isin(train_idx)].values
test['Age'][test.index.isin(test_idx - 891)] = dataset['Age'][dataset.index.isin(test_idx)].values | Titanic - Machine Learning from Disaster |
6,786,557 | train["fpp"] = 0
train["tpp"] = 0
train.loc[train.matchType_2 == "fpp", "fpp"] = 1
train.loc[train.matchType_2 == "tpp", "tpp"] = 1<filter> | ages = []
for i in dataset.Age:
if i < 18:
ages.append('less_18')
elif i >= 18 and i < 50:
ages.append('18_50')
else:
ages.append('greater_50')
dataset.Age = ages
ages = []
for i in train.Age:
if i < 18:
ages.append('less_18')
elif i >= 18 and i < 50:
ages.append('18_50')
else:
ages.append('greater_50')
train.Age = ages
ages = []
for i in test.Age:
if i < 18:
ages.append('less_18')
elif i >= 18 and i < 50:
ages.append('18_50')
else:
ages.append('greater_50')
test.Age = ages | Titanic - Machine Learning from Disaster |
6,786,557 | print(list(train.columns[train.dtypes != "O"]))<define_variables> | x_train = train.loc[:, ~train.columns.isin(['PassengerId', 'Survived', 'Sex'])]
y_train = train.Survived
x_test = test.loc[:, ~test.columns.isin(['PassengerId', 'Survived', 'Sex'])] | Titanic - Machine Learning from Disaster |
6,786,557 | feature = ["assists", "boosts", "damageDealt", "DBNOs", "headshotKills", "heals",
"killPlace", "killPoints", "kills", "killStreaks", "longestKill",
"matchDuration", "maxPlace", "rankPoints", "revives", "rideDistance",
"roadKills", "swimDistance", "teamKills", "vehicleDestroys", "walkDistance", "weaponsAcquired", "winPoints", "player"]<define_variables> | x_train = pd.get_dummies(x_train)
x_train["Sex"] = train.Sex
x_test = pd.get_dummies(x_test)
x_test["Sex"] = test.Sex | Titanic - Machine Learning from Disaster |
6,786,557 | feature_1 = ["matchId", "assists", "boosts", "damageDealt", "DBNOs", "headshotKills", "heals",
"killPlace", "killPoints", "kills", "killStreaks", "longestKill",
"revives", "rideDistance", "roadKills", "swimDistance", "teamKills",
"vehicleDestroys", "walkDistance", "weaponsAcquired", "winPoints"]<define_variables> | rf = RandomForestClassifier()
rf.fit(x_train, y_train ) | Titanic - Machine Learning from Disaster |
6,786,557 | feature_2 = ["matchDuration", "maxPlace", "rankPoints", "player", "fpp", "tpp"]<count_missing_values> | ABC = AdaBoostClassifier(DecisionTreeClassifier())
ABC_param_grid = {"base_estimator__criterion" : ["gini", "entropy"],
"base_estimator__splitter" : ["best", "random"],
"algorithm" : ["SAMME","SAMME.R"],
"n_estimators" :[5,6,7,8,9,10,20],
"learning_rate": [0.001, 0.01, 0.1, 0.3]}
gsABC = GridSearchCV(ABC, param_grid = ABC_param_grid, cv = 10, scoring = "accuracy", n_jobs = 6, verbose = 1)
gsABC.fit(x_train,y_train)
ada_best = gsABC.best_estimator_
gsABC.best_score_ | Titanic - Machine Learning from Disaster |
6,786,557 | for i in list(train.columns[train.dtypes != "O"]):
print(i, ":", sum(train[i].isna()))<feature_engineering> | ExtC = ExtraTreesClassifier()
ex_param_grid = {"max_depth": [3, 4, 5],
"max_features": [3, 10, 15],
"min_samples_split": [2, 3, 4],
"min_samples_leaf": [1, 2],
"bootstrap": [False,True],
"n_estimators" :[100,200,300],
"criterion": ["gini","entropy"]}
gsExtC = GridSearchCV(ExtC, param_grid = ex_param_grid, cv = 10, scoring = "accuracy", n_jobs = 6, verbose = 1)
gsExtC.fit(x_train,y_train)
ext_best = gsExtC.best_estimator_
gsExtC.best_score_ | Titanic - Machine Learning from Disaster |
6,786,557 |
<feature_engineering> | rf_test = {"max_depth": [24,26],
"max_features": [6,8,10],
"min_samples_split": [3,4],
"min_samples_leaf": [3,4],
"bootstrap": [True],
"n_estimators" :[50,80],
"criterion": ["gini","entropy"],
"max_leaf_nodes":[26,28],
"min_impurity_decrease":[0.0],
"min_weight_fraction_leaf":[0.0]}
tuning = GridSearchCV(estimator = RandomForestClassifier() , param_grid = rf_test, scoring = 'accuracy', n_jobs = 6, cv = 10)
tuning.fit(x_train,np.ravel(y_train))
rf_best = tuning.best_estimator_
tuning.best_score_ | Titanic - Machine Learning from Disaster |
6,786,557 |
<count_missing_values> | GBM = GradientBoostingClassifier()
gb_param_grid = {'loss' : ["deviance"],
'n_estimators' : [450,460,500],
'learning_rate': [0.1,0.11],
'max_depth': [7,8],
'min_samples_leaf': [30,40],
'max_features': [0.1,0.4,0.6]}
gsGBC = GridSearchCV(GBM, param_grid = gb_param_grid, cv = 10, scoring = "accuracy", n_jobs = 6, verbose = 1)
gsGBC.fit(x_train,y_train)
gbm_best = gsGBC.best_estimator_
gsGBC.best_score_ | Titanic - Machine Learning from Disaster |
6,786,557 | np.sum(train.winPlacePerc.isna() )<filter> | SVMC = SVC(probability=True)
svc_param_grid = {'kernel': ['rbf'],
'gamma': [0.027,0.029,0.03,0.031],
'C': [45,55,76,77,78,85,95,100],
'tol':[0.001,0.0008,0.0009,0.0011]}
gsSVMC = GridSearchCV(SVMC, param_grid = svc_param_grid, cv = 10, scoring = "accuracy", n_jobs = 6, verbose = 1)
gsSVMC.fit(x_train,y_train)
svm_best = gsSVMC.best_estimator_
gsSVMC.best_score_ | Titanic - Machine Learning from Disaster |
6,786,557 | train = train.loc[train.winPlacePerc.notna() , :]<train_model> | XGB = XGBClassifier()
xgb_param_grid = {'learning_rate': [0.1,0.04,0.01],
'max_depth': [5,6,7],
'n_estimators': [350,400,450,2000],
'gamma': [0,1,5,8],
'subsample': [0.8,0.95,1.0]}
gsXBC = GridSearchCV(XGB, param_grid = xgb_param_grid, cv = 10, scoring = "accuracy", n_jobs = 6, verbose = 1)
gsXBC.fit(x_train,y_train)
xgb_best = gsXBC.best_estimator_
gsXBC.best_score_ | Titanic - Machine Learning from Disaster |
6,786,557 |
<categorify> | voting = VotingClassifier(estimators=[('rfc', rf_best),
('extc', ext_best),
('svc', svm_best),
('gbc',gbm_best),
('xgbc',xgb_best),
('ada',ada_best)])
v_param_grid = {'voting':['soft',
'hard']}
gsV = GridSearchCV(voting,
param_grid =
v_param_grid,
cv = 10,
scoring = "accuracy",
n_jobs = 6,
verbose = 1)
gsV.fit(x_train,y_train)
v_best = gsV.best_estimator_
gsV.best_score_ | Titanic - Machine Learning from Disaster |
6,786,557 |
<create_dataframe> | pred = v_best.predict(x_test)
submission = pd.DataFrame(test.PassengerId)
submission["Survived"] = pd.Series(pred ) | Titanic - Machine Learning from Disaster |
6,786,557 |
<feature_engineering> | submission.to_csv("submission.csv",index=False ) | Titanic - Machine Learning from Disaster |
2,817,510 |
<rename_columns> | train_df = pd.read_csv('.. /input/train.csv', index_col='PassengerId')
test_df = pd.read_csv('.. /input/test.csv', index_col='PassengerId')
train_df.head() | Titanic - Machine Learning from Disaster |
2,817,510 | train.set_index("Id", inplace=True)
train.index.name = "Id"<prepare_x_and_y> | print('Train dataset:')
print(train_df.isna().sum() [train_df.isna().any() ])
print('
Test dataset:')
print(test_df.isna().sum() [test_df.isna().any() ] ) | Titanic - Machine Learning from Disaster |
2,817,510 | temp_1 = train.loc[:, feature_1]
temp_2 = train.loc[:, feature_2]<feature_engineering> | cat_feat = ['Sex', 'Embarked']
for cf in cat_feat:
if cf != cat_feat[0]:
print()
print(train_df[cf].value_counts() / train_df[cf].count() ) | Titanic - Machine Learning from Disaster |
2,817,510 | temp_1.groupby("matchId" ).transform(minmax)
for i in temp_2.columns[:4]:
temp_2[i] =(temp_2[i] - min(temp_2[i])) /(max(temp_2[i])- min(temp_2[i]))<merge> | train_df = train_df.drop(['Ticket', 'Cabin'], axis=1)
test_df = test_df.drop(['Ticket', 'Cabin'], axis=1 ) | Titanic - Machine Learning from Disaster |
2,817,510 | X = pd.merge(temp_1, temp_2, on="Id")
X = pd.merge(X, train.loc[:, ["matchType_1", "winPlacePerc"]], on="Id" )<feature_engineering> | extract_title = lambda df: df.Name.str.extract(r'([A-Za-z]+)\.', expand=False)
train_df['Title'] = extract_title(train_df)
test_df['Title'] = extract_title(test_df)
train_df.Title.value_counts() | Titanic - Machine Learning from Disaster |
2,817,510 |
<feature_engineering> | def replace_titles(df):
df['Title'] = df['Title'].replace(['Lady', 'Countess','Capt', 'Col',\
'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
df['Title'] = df['Title'].replace('Mlle', 'Miss')
df['Title'] = df['Title'].replace('Ms', 'Miss')
df['Title'] = df['Title'].replace('Mme', 'Mrs')
return df
train_df = replace_titles(train_df)
test_df = replace_titles(test_df)
train_df = train_df.drop('Name', axis=1)
test_df = test_df.drop('Name', axis=1 ) | Titanic - Machine Learning from Disaster |
2,817,510 |
<feature_engineering> | def add_family(df):
df['Family'] = df.SibSp + df.Parch
return df
train_df = add_family(train_df)
test_df = add_family(test_df ) | Titanic - Machine Learning from Disaster |
2,817,510 |
<filter> | family_feat = ['SibSp', 'Parch']
train_df = train_df.drop(family_feat, axis=1)
test_df = test_df.drop(family_feat, axis=1 ) | Titanic - Machine Learning from Disaster |
2,817,510 |
<count_values> | def fill_ages(df):
df.loc[(df.Age.isnull())&(df.Title == 'Master'), 'Age'] = df[df.Title == 'Master'].Age.median()
df.loc[(df.Age.isnull())&(df.Title != 'Master'), 'Age'] = df[df.Title != 'Master'].Age.median()
return df
def fill_embarked(df):
df['Embarked'] = df.Embarked.fillna('S')
return df
train_df = fill_ages(fill_embarked(train_df))
test_df = fill_ages(fill_embarked(test_df))
test_df['Fare'] = test_df.Fare.fillna(test_df.Fare.median() ) | Titanic - Machine Learning from Disaster |
2,817,510 | print("Name: ", feature, "
Count: ", len(feature))<define_variables> | def add_bands(df):
df['AgeBand'] = pd.cut(df.Age, bins=5)
df['FareBand'] = pd.qcut(df.Fare, q=4)
return df
train_df = add_bands(train_df)
test_df = add_bands(test_df)
train_df = train_df.drop(['Age', 'Fare'], axis=1)
test_df = test_df.drop(['Age', 'Fare'], axis=1 ) | Titanic - Machine Learning from Disaster |
2,817,510 | list_feat = ["assists", "boosts", "damageDealt", "DBNOs", "headshotKills", "heals",
"killPlace", "killPoints", "kills", "killStreaks", "longestKill",
"matchDuration", "maxPlace", "rankPoints", "revives", "rideDistance",
"roadKills", "swimDistance", "teamKills", "vehicleDestroys", "walkDistance",
"weaponsAcquired", "winPoints", "player", "fpp", "tpp"]<define_variables> | def factorize(df):
df['Sex'] = df.Sex.factorize() [0]
df['AgeBand'] = df.AgeBand.factorize(sort=True)[0]
df['FareBand'] = df.FareBand.factorize(sort=True)[0]
return df
def one_hot_encode(df):
return pd.get_dummies(df, columns=['Embarked', 'Title'])
train_df = one_hot_encode(factorize(train_df))
test_df = one_hot_encode(factorize(test_df))
train_df.head() | Titanic - Machine Learning from Disaster |
2,817,510 | list_feat_1 = ["assists", "boosts", "damageDealt", "DBNOs", "headshotKills", "heals",
"killPlace", "killPoints", "kills", "killStreaks", "longestKill",
"matchDuration", "maxPlace", "rankPoints", "revives", "rideDistance",
"roadKills", "swimDistance", "teamKills", "vehicleDestroys", "walkDistance",
"weaponsAcquired", "winPoints", "player", "fpp", "tpp", "matchId"]<prepare_x_and_y> | X = train_df.drop('Survived', axis=1)
y = train_df.Survived | Titanic - Machine Learning from Disaster |
2,817,510 | train = X<choose_model_class> | param_grid = {
'n_estimators': [90, 95, 100],
'learning_rate': [0.009, 0.01],
'max_depth': [3],
'min_child_weight' :range(1, 3),
'gamma': [0, 0.001, 0.005]
}
gsearch = GridSearchCV(cv=5, estimator=XGBClassifier() , param_grid=param_grid, n_jobs=-1)
gsearch.fit(X, y)
gsearch.best_params_, gsearch.best_score_
| Titanic - Machine Learning from Disaster |
2,817,510 | model_1 = keras.models.Sequential()
model_1.add(Dense(32, input_dim=len(list_feat), activation="elu", kernel_initializer="he_normal"))
model_1.add(Dense(64, activation="elu", kernel_initializer="he_normal"))
model_1.add(Dense(128, activation="elu", kernel_initializer="he_normal"))
model_1.add(keras.layers.Dropout(0.25))
model_1.add(Dense(256, activation="elu", kernel_initializer="he_normal"))
model_1.add(Dense(256, activation="elu", kernel_initializer="he_normal"))
model_1.add(keras.layers.Dropout(0.25))
model_1.add(Dense(128, activation="elu", kernel_initializer="he_normal"))
model_1.add(Dense(64, activation="elu", kernel_initializer="he_normal"))
model_1.add(Dense(32, activation="elu", kernel_initializer="he_normal"))
model_1.add(keras.layers.Dropout(0.25))
model_1.add(Dense(1, activation="sigmoid"))
model_1.compile(optimizer="RMSprop", loss='MAE', metrics=["MAE"] )<prepare_x_and_y> | param_grid = {
'C': np.arange(0.99, 1.1, 0.01)
}
model = SVC(gamma='auto')
gsearch = GridSearchCV(cv=5, estimator=model, param_grid=param_grid, n_jobs=-1)
gsearch.fit(X, y)
print(f'Best params: {gsearch.best_params_}')
print(f'Best score: {gsearch.best_score_}')
| Titanic - Machine Learning from Disaster |
2,817,510 | x_train = train.loc[train.matchType_1 == "solo", list_feat]
y_train = train.loc[train.matchType_1 == "solo", ["winPlacePerc"]]<train_model> | param_grid = {
'n_estimators': range(200, 300, 50),
'max_depth': range(2, 5),
'min_samples_split': [2, 3, 4],
'bootstrap': [True, False]
}
model = RandomForestClassifier()
gsearch = GridSearchCV(cv=5, estimator=model, param_grid=param_grid, n_jobs=-1)
gsearch.fit(X, y)
print(f'Best params: {gsearch.best_params_}')
print(f'Best score: {gsearch.best_score_}' ) | Titanic - Machine Learning from Disaster |
2,817,510 | model_1.fit(x=x_train, y=y_train, epochs=50, batch_size=10000, validation_split=0.2, shuffle=True)
model_1.fit(x=x_train, y=y_train, epochs=30, batch_size=2000, validation_split=0.2, shuffle=True )<train_model> | model = SVC(C = 1.26, gamma = 0.09)
model.fit(X, y)
predictions = model.predict(test_df ) | Titanic - Machine Learning from Disaster |
2,817,510 |
<save_model> | submit_df = pd.DataFrame({
'PassengerId': test_df.index.values,
'Survived': predictions
})
submit_df.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
3,048,663 | model_1.save("model_1_solo.h5")
<choose_model_class> | datrn = pd.read_csv('.. /input/train.csv')
datst = pd.read_csv('.. /input/test.csv' ) | Titanic - Machine Learning from Disaster |
3,048,663 | model_2 = keras.models.Sequential()
model_2.add(Dense(32, input_dim=len(list_feat), activation="elu", kernel_initializer="he_normal"))
model_2.add(Dense(64, activation="elu", kernel_initializer="he_normal"))
model_2.add(Dense(128, activation="elu", kernel_initializer="he_normal"))
model_2.add(keras.layers.Dropout(0.25))
model_2.add(Dense(256, activation="elu", kernel_initializer="he_normal"))
model_2.add(Dense(256, activation="elu", kernel_initializer="he_normal"))
model_2.add(keras.layers.Dropout(0.25))
model_2.add(Dense(128, activation="elu", kernel_initializer="he_normal"))
model_2.add(Dense(64, activation="elu", kernel_initializer="he_normal"))
model_2.add(Dense(32, activation="elu", kernel_initializer="he_normal"))
model_2.add(keras.layers.Dropout(0.25))
model_2.add(Dense(1, activation="sigmoid"))
model_2.compile(optimizer="RMSprop", loss='MAE', metrics=["MAE"] )<prepare_x_and_y> | datsub = pd.read_csv('.. /input/gender_submission.csv' ) | Titanic - Machine Learning from Disaster |
3,048,663 | x_train = train.loc[train.matchType_1 == "duo", list_feat]
y_train = train.loc[train.matchType_1 == "duo", ["winPlacePerc"]]<train_model> | outcome = datrn['Survived']
data = datrn.drop('Survived',axis=1 ) | Titanic - Machine Learning from Disaster |
3,048,663 | model_2.fit(x=x_train, y=y_train, epochs=50, batch_size=10000, validation_split=0.2, shuffle=True)
model_2.fit(x=x_train, y=y_train, epochs=40, batch_size=2000, validation_split=0.2, shuffle=True )<train_model> | def accuracy_score(truth, pred):
if len(truth)== len(pred):
return "Predictions have an accuracy of {:.2f}%.".format(( truth == pred ).mean() *100)
else:
return "Number of predictions does not match number of outcomes!" | Titanic - Machine Learning from Disaster |
3,048,663 |
<save_model> | warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
get_ipython().run_line_magic('matplotlib', 'inline')
def filter_data(data, condition):
field, op, value = condition.split(" ")
try:
value = float(value)
except:
value = value.strip("'"")
if op == ">":
matches = data[field] > value
elif op == "<":
matches = data[field] < value
elif op == ">=":
matches = data[field] >= value
elif op == "<=":
matches = data[field] <= value
elif op == "==":
matches = data[field] == value
elif op == "!=":
matches = data[field] != value
else:
raise Exception("Invalid comparison operator.Only >, <, >=, <=, ==, != allowed.")
data = data[matches].reset_index(drop = True)
return data
def survival_stats(data, outcomes, key, filters = []):
if key not in data.columns.values :
print("'{}' is not a feature of the Titanic data.Did you spell something wrong?".format(key))
return False
if(key == 'Cabin' or key == 'PassengerId' or key == 'Ticket'):
print("'{}' has too many unique categories to display! Try a different feature.".format(key))
return False
all_data = pd.concat([data, outcomes.to_frame() ], axis = 1)
for condition in filters:
all_data = filter_data(all_data, condition)
all_data = all_data[[key, 'Survived']]
plt.figure(figsize=(8,6))
if(key == 'Age' or key == 'Fare'):
all_data = all_data[~np.isnan(all_data[key])]
min_value = all_data[key].min()
max_value = all_data[key].max()
value_range = max_value - min_value
if(key == 'Fare'):
bins = np.arange(0, all_data['Fare'].max() + 20, 20)
if(key == 'Age'):
bins = np.arange(0, all_data['Age'].max() + 10, 10)
nonsurv_vals = all_data[all_data['Survived'] == 0][key].reset_index(drop = True)
surv_vals = all_data[all_data['Survived'] == 1][key].reset_index(drop = True)
plt.hist(nonsurv_vals, bins = bins, alpha = 0.6,
color = 'red', label = 'Did not survive')
plt.hist(surv_vals, bins = bins, alpha = 0.6,
color = 'green', label = 'Survived')
plt.xlim(0, bins.max())
plt.legend(framealpha = 0.8)
else:
if(key == 'Pclass'):
values = np.arange(1,4)
if(key == 'Parch' or key == 'SibSp'):
values = np.arange(0,np.max(data[key])+ 1)
if(key == 'Embarked'):
values = ['C', 'Q', 'S']
if(key == 'Sex'):
values = ['male', 'female']
frame = pd.DataFrame(index = np.arange(len(values)) , columns=(key,'Survived','NSurvived'))
for i, value in enumerate(values):
frame.loc[i] = [value, \
len(all_data[(all_data['Survived'] == 1)&(all_data[key] == value)]), \
len(all_data[(all_data['Survived'] == 0)&(all_data[key] == value)])]
bar_width = 0.4
for i in np.arange(len(frame)) :
nonsurv_bar = plt.bar(i-bar_width, frame.loc[i]['NSurvived'], width = bar_width, color = 'r')
surv_bar = plt.bar(i, frame.loc[i]['Survived'], width = bar_width, color = 'g')
plt.xticks(np.arange(len(frame)) , values)
plt.legend(( nonsurv_bar[0], surv_bar[0]),('Did not survive', 'Survived'), framealpha = 0.8)
plt.xlabel(key)
plt.ylabel('Number of Passengers')
plt.title('Passenger Survival Statistics With '%s' Feature'%(key))
plt.show()
if sum(pd.isnull(all_data[key])) :
nan_outcomes = all_data[pd.isnull(all_data[key])]['Survived']
print("Passengers with missing '{}' values: {}({} survived, {} did not survive)".format(\
key, len(nan_outcomes), sum(nan_outcomes == 1), sum(nan_outcomes == 0)) ) | Titanic - Machine Learning from Disaster |
3,048,663 | model_2.save("model_2_duo.h5")
<choose_model_class> | survival_stats(data, outcome, 'Sex' ) | Titanic - Machine Learning from Disaster |
3,048,663 | model_3 = keras.models.Sequential()
model_3.add(Dense(32, input_dim=len(list_feat), activation="elu", kernel_initializer="he_normal"))
model_3.add(Dense(64, activation="elu", kernel_initializer="he_normal"))
model_3.add(Dense(128, activation="elu", kernel_initializer="he_normal"))
model_3.add(keras.layers.Dropout(0.25))
model_3.add(Dense(256, activation="elu", kernel_initializer="he_normal"))
model_3.add(Dense(256, activation="elu", kernel_initializer="he_normal"))
model_3.add(keras.layers.Dropout(0.35))
model_3.add(Dense(128, activation="elu", kernel_initializer="he_normal"))
model_3.add(Dense(64, activation="elu", kernel_initializer="he_normal"))
model_3.add(Dense(32, activation="elu", kernel_initializer="he_normal"))
model_3.add(keras.layers.Dropout(0.25))
model_3.add(Dense(1, activation="sigmoid"))
model_3.compile(optimizer="RMSprop", loss='MAE', metrics=["MAE"] )<prepare_x_and_y> | survival_stats(data, outcome, 'Age', ["Sex == 'female'"] ) | Titanic - Machine Learning from Disaster |
3,048,663 | x_train = train.loc[train.matchType_1 == "squad", list_feat]
y_train = train.loc[train.matchType_1 == "squad", ["winPlacePerc"]]<train_model> | survival_stats(data, outcome, "Age",["Sex == 'male'","Embarked == 'C'"] ) | Titanic - Machine Learning from Disaster |
3,048,663 | model_3.fit(x=x_train, y=y_train, epochs=60, batch_size=10000, validation_split=0.2, shuffle=True)
model_3.fit(x=x_train, y=y_train, epochs=50, batch_size=3000, validation_split=0.2, shuffle=True )<train_model> | survival_stats(data, outcome, "Age",["Sex == 'male'","Embarked == 'S'"] ) | Titanic - Machine Learning from Disaster |
3,048,663 |
<save_model> | def predict(data):
predictions = []
for _, passenger in data.iterrows() :
if passenger['Sex'] == 'female':
if passenger['Embarked']== 'C' and passenger['Pclass'] <=3 :predictions.append(1)
elif passenger['Embarked']== 'S' and passenger['Pclass'] <3:predictions.append(1)
else:
if(passenger['SibSp'] <2)and(passenger['Parch']<2):
predictions.append(1)
else:
predictions.append(0)
elif passenger['Sex']=='male' and passenger['Age']<15:
if passenger['SibSp'] < 3 and passenger['Embarked']=='S':predictions.append(1)
elif passenger['Embarked']=='C':predictions.append(1)
else:predictions.append(0)
else:predictions.append(0)
return pd.Series(predictions ) | Titanic - Machine Learning from Disaster |
3,048,663 | model_3.save("model_3_squad.h5")
<choose_model_class> | pred = predict(data ) | Titanic - Machine Learning from Disaster |
3,048,663 | model_4 = keras.models.Sequential()
model_4.add(Dense(32, input_dim=len(list_feat), activation="elu", kernel_initializer="he_normal"))
model_4.add(Dense(64, activation="elu", kernel_initializer="he_normal"))
model_4.add(Dense(128, activation="elu", kernel_initializer="he_normal"))
model_4.add(keras.layers.Dropout(0.25))
model_4.add(Dense(128, activation="elu", kernel_initializer="he_normal"))
model_4.add(Dense(64, activation="elu", kernel_initializer="he_normal"))
model_4.add(Dense(32, activation="elu", kernel_initializer="he_normal"))
model_4.add(keras.layers.Dropout(0.25))
model_4.add(Dense(1, activation="sigmoid"))
model_4.compile(optimizer="RMSprop", loss='MAE', metrics=["MAE"] )<prepare_x_and_y> | print(accuracy_score(outcome,pred)) | Titanic - Machine Learning from Disaster |
3,048,663 | x_train = train.loc[train.matchType_1 == "etc", list_feat]
y_train = train.loc[train.matchType_1 == "etc", ["winPlacePerc"]]<train_model> | predtst = predict(datst ) | Titanic - Machine Learning from Disaster |
3,048,663 | model_4.fit(x=x_train, y=y_train, epochs=70, batch_size=10000, validation_split=0.2, shuffle=True)
model_4.fit(x=x_train, y=y_train, epochs=150, batch_size=1000, validation_split=0.2, shuffle=True )<define_variables> | Past = datst.iloc[:,0] | Titanic - Machine Learning from Disaster |
3,048,663 |
<save_model> | dst = []
i=0
while i<len(predtst):
dst.append(( Past[i],predtst[i]))
i+=1 | Titanic - Machine Learning from Disaster |
3,048,663 | model_4.save("model_4_etc.h5")
<drop_column> | d = pd.DataFrame(dst,columns=['PassengerId','Survived'] ) | Titanic - Machine Learning from Disaster |
3,048,663 | <load_from_csv><EOS> | d.to_csv('gender_submission.csv',index=False ) | Titanic - Machine Learning from Disaster |
1,316,433 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<count_missing_values> | %matplotlib inline
py.init_notebook_mode(connected=True)
warnings.filterwarnings('ignore')
GradientBoostingClassifier, ExtraTreesClassifier)
print(os.listdir(".. /input"))
| Titanic - Machine Learning from Disaster |
1,316,433 | print("Check The NA value in test data")
for i in list(test.columns[test.dtypes != "O"]):
print(i, ":", sum(test[i].isna()))<count_unique_values> | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv' ) | Titanic - Machine Learning from Disaster |
1,316,433 | len(pd.unique(test.matchId)) , sum(test.groupby("matchId" ).size() < 9 )<merge> | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv' ) | Titanic - Machine Learning from Disaster |
1,316,433 | temp = pd.DataFrame(test.groupby("matchId" ).size() , columns=["player"])
temp.reset_index(level=0, inplace=True)
test = test.merge(temp, left_on="matchId", right_on="matchId" )<feature_engineering> | train.columns[train.isnull().any() ].tolist() | Titanic - Machine Learning from Disaster |
1,316,433 | test["matchType_1"] = "-"
test.loc[(test.matchType == "solo-fpp")|
(test.matchType == "solo")|
(test.matchType == "normal-solo-fpp")|
(test.matchType == "normal-solo"), "matchType_1"] = "solo"
test.loc[(test.matchType == "duo-fpp")|
(test.matchType == "duo")|
(test.matchType == "normal-duo-fpp")|
(test.matchType == "normal-duo"), "matchType_1"] = "duo"
test.loc[(test.matchType == "squad-fpp")|
(test.matchType == "squad")|
(test.matchType == "normal-squad-fpp")|
(test.matchType == "normal-squad"), "matchType_1"] = "squad"
test.loc[(test.matchType == "flarefpp")|
(test.matchType == "flaretpp")|
(test.matchType == "crashfpp")|
(test.matchType == "crashtpp"), "matchType_1"] = "etc"<feature_engineering> | print(train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean() ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.