kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
14,055,799 | numb=d_train.groupby(['AWKSTAT'])['INCCAT'].agg({'Nr':'sum','Tot':'count'})
numb['Ratio']=numb['Nr']/numb['Tot']
numb=numb.sort_values(numb.columns[2])
numb=numb.reset_index()
le_AWKSTAT = LabelEncoder()
le_AWKSTAT.classes_=numb.iloc[:, 0]
d_train.loc[:, 'AWKSTAT'] = le_AWKSTAT.transform(d_train.loc[:, 'AWKSTAT'])
d_test.loc[:, 'AWKSTAT'] = le_AWKSTAT.transform(d_test.loc[:, 'AWKSTAT'] )<feature_engineering> | dfl = pd.DataFrame()
good_columns = ['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7']
dfl[good_columns] = temp[good_columns]
dfh = dfl.copy()
dfl_enc = dfl.apply(LabelEncoder().fit_transform)
dfl_enc.head() | Titanic - Machine Learning from Disaster |
14,055,799 | numb=d_train.groupby(['CAPGAIN'])['INCCAT'].agg({'Nr':'sum','Tot':'count'})
numb['Ratio']=numb['Nr']/numb['Tot']
numb=numb.reset_index()
numb=numb.sort_values(numb.columns[3])
d_train.loc[d_train['CAPGAIN']<=3000,'CAPGAIN']=0
d_test.loc[d_test['CAPGAIN']<=3000,'CAPGAIN']=0
d_train.loc[(d_train['CAPGAIN']>3000)&(d_train['CAPGAIN']<10000),'CAPGAIN']=1
d_test.loc[(d_test['CAPGAIN']>3000)&(d_test['CAPGAIN']<10000),'CAPGAIN']=1
d_train.loc[d_train['CAPGAIN']>=10000,'CAPGAIN']=2
d_test.loc[d_test['CAPGAIN']>=10000,'CAPGAIN']=2
<feature_engineering> | one_hot_cols = dfh.columns.tolist()
dfh_enc = pd.get_dummies(dfh, columns=one_hot_cols)
dfh_enc.head() | Titanic - Machine Learning from Disaster |
14,055,799 | numb=d_train.groupby(['DIVVAL'])['INCCAT'].agg({'Nr':'sum','Tot':'count'})
numb['Ratio']=numb['Nr']/numb['Tot']
numb=numb.reset_index()
numb=numb.sort_values(numb.columns[0])
d_train['DIVCAT']=0
d_test['DIVCAT']=0
d_train.loc[d_train['DIVVAL']<700,'DIVCAT']=0
d_test.loc[d_test['DIVVAL']<700,'DIVCAT']=0
d_train.loc[(d_train['DIVVAL']>=700)&(d_train['DIVVAL']<=11000),'DIVCAT']=1
d_test.loc[(d_test['DIVVAL']>700)&(d_test['DIVVAL']<=11000),'DIVCAT']=1
d_train.loc[(d_train['DIVVAL']>11000)&(d_train['DIVVAL']<=28000),'DIVCAT']=2
d_test.loc[(d_test['DIVVAL']>11000)&(d_test['DIVVAL']<=28000),'DIVCAT']=2
d_train.loc[d_train['DIVVAL']>28000,'DIVCAT']=3
d_test.loc[d_test['DIVVAL']>28000,'DIVCAT']=3
d_train=d_train.drop(['DIVVAL'],axis=1)
d_test=d_test.drop(['DIVVAL'],axis=1 )<categorify> | train = dfh_enc[:ntrain]
test = dfh_enc[ntrain:] | Titanic - Machine Learning from Disaster |
14,055,799 | numb=d_train.groupby(['FILESTAT'])['INCCAT'].agg({'Nr':'sum','Tot':'count'})
numb['Ratio']=numb['Nr']/numb['Tot']
numb=numb.sort_values(numb.columns[2])
numb=numb.reset_index()
le_FILESTAT = LabelEncoder()
le_FILESTAT.classes_=numb.iloc[:, 0]
d_train.loc[:, 'FILESTAT'] = le_FILESTAT.transform(d_train.loc[:, 'FILESTAT'])
d_test.loc[:, 'FILESTAT'] = le_FILESTAT.transform(d_test.loc[:, 'FILESTAT'])
<categorify> | X_test = test
X_train = train | Titanic - Machine Learning from Disaster |
14,055,799 | numb=d_train.groupby(['GRINREG'])['INCCAT'].agg({'Nr':'sum','Tot':'count'})
numb['Ratio']=numb['Nr']/numb['Tot']
numb=numb.sort_values(numb.columns[2])
numb=numb.reset_index()
numb
d_train.loc[d_train['GRINREG']=='Not in universe','GRINREG']=1
d_test.loc[d_test['GRINREG']=='Not in universe','GRINREG']=1
d_train.loc[d_train['GRINREG']!=1,'GRINREG']=0
d_test.loc[d_test['GRINREG']!=1,'GRINREG']=0
d_train=d_train.drop(['GRINST'],axis=1)
d_test=d_test.drop(['GRINST'],axis=1)
<feature_engineering> | scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test ) | Titanic - Machine Learning from Disaster |
14,055,799 | d_train.loc[d_train['HHDREL'].str.contains('Spouse of householder'),'HHDFMX']='Spouse of householder'
d_test.loc[d_test['HHDREL'].str.contains('Spouse of householder'),'HHDFMX']='Spouse of householder'
d_train.loc[(d_train['HHDREL'].str.contains('Householder')) &
(d_train['HHDFMX'].str.contains('In group quarters')) ,'HHDFMX']='Nonfamily householder'
d_test.loc[(d_test['HHDREL'].str.contains('Householder')) &
(d_test['HHDFMX'].str.contains('In group quarters')) ,'HHDFMX']='Nonfamily householder'
d_train.loc[d_train['HHDFMX'].str.contains('In group quarters'),'HHDFMX']='Other relative of householder'
d_test.loc[d_test['HHDFMX'].str.contains('In group quarters'),'HHDFMX']='Other relative of householder'
d_train.loc[d_train['HHDFMX'].str.contains('Other relative of householder'),'HHDREL']='Other relative of householder'
d_test.loc[d_test['HHDFMX'].str.contains('Other relative of householder'),'HHDREL']='Other relative of householder'
d_train.loc[d_train['HHDFMX'].str.contains('Grandchild'),'HHDFMX']='Child 18+'
d_test.loc[d_test['HHDFMX'].str.contains('Grandchild'),'HHDFMX']='Child 18+'
d_train.loc[d_train['HHDFMX'].str.contains('Child'),'HHDREL']='Child 18+'
d_test.loc[d_test['HHDFMX'].str.contains('Child'),'HHDREL']='Child 18+'
d_train.loc[d_train['HHDFMX'].str.contains('Child'),'HHDFMX']='Child 18+'
d_test.loc[d_test['HHDFMX'].str.contains('Child'),'HHDFMX']='Child 18+'
numb=d_train.groupby(['HHDFMX'])['INCCAT'].agg({'Nr':'sum','Tot':'count'})
numb['Ratio']=numb['Nr']/numb['Tot']
numb=numb.reset_index()
numb=numb.sort_values(numb.columns[0])
le_HHDFMX = LabelEncoder()
le_HHDFMX.classes_=numb.iloc[:, 0]
d_train.loc[:, 'HHDFMX'] = le_HHDFMX.transform(d_train.loc[:, 'HHDFMX'])
d_test.loc[:, 'HHDFMX'] = le_HHDFMX.transform(d_test.loc[:, 'HHDFMX'])
numb=d_train.groupby(['HHDREL'],)['INCCAT'].agg({'Nr':'sum','Tot':'count'})
numb['Ratio']=numb['Nr']/numb['Tot']
numb=numb.reset_index()
numb=numb.sort_values(numb.columns[0])
numb
le_HHDREL = LabelEncoder()
le_HHDREL.classes_=numb.iloc[:, 0]
d_train.loc[:, 'HHDREL'] = le_HHDREL.transform(d_train.loc[:, 'HHDREL'])
d_test.loc[:, 'HHDREL'] = le_HHDREL.transform(d_test.loc[:, 'HHDREL'])
<categorify> | ran = RandomForestClassifier(random_state=1)
knn = KNeighborsClassifier()
log = LogisticRegression()
xgb = XGBClassifier()
gbc = GradientBoostingClassifier()
svc = SVC(probability=True)
ext = ExtraTreesClassifier()
ada = AdaBoostClassifier()
gnb = GaussianNB()
gpc = GaussianProcessClassifier()
bag = BaggingClassifier()
models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag]
model_names = ['Random Forest', 'K Nearest Neighbor', 'Logistic Regression', 'XGBoost', 'Gradient Boosting', 'SVC', 'Extra Trees', 'AdaBoost', 'Gaussian Naive Bayes', 'Gaussian Process', 'Bagging Classifier']
scores = {}
for ind, mod in enumerate(models):
mod.fit(X_train, y_train)
acc = cross_val_score(mod, X_train, y_train, scoring="accuracy", cv=10)
scores[model_names[ind]] = acc | Titanic - Machine Learning from Disaster |
14,055,799 | pd.set_option('display.max_rows', 20)
numb=d_train.groupby(['MIGMTR3'])['INCCAT'].agg({'Nr':'sum','Tot':'count'})
numb['Ratio']=numb['Nr']/numb['Tot']
numb=numb.sort_values(numb.columns[2])
numb=numb.reset_index()
numb
le_MIGMTR3 = LabelEncoder()
le_MIGMTR3.classes_=numb.iloc[:, 0]
d_train.loc[:, 'MIGMTR3'] = le_MIGMTR3.transform(d_train.loc[:, 'MIGMTR3'])
d_test.loc[:, 'MIGMTR3'] = le_MIGMTR3.transform(d_test.loc[:, 'MIGMTR3'])
d_train=d_train.drop(['MIGMTR1','MIGMTR4'],axis=1)
d_test=d_test.drop(['MIGMTR1','MIGMTR4'],axis=1)
<categorify> | results = pd.DataFrame(scores ).T
results['mean'] = results.mean(1)
result_df = results.sort_values(by='mean', ascending=False)
result_df.head(11 ) | Titanic - Machine Learning from Disaster |
14,055,799 | numb=d_train.groupby(['MIGSAME'])['INCCAT'].agg({'Nr':'sum','Tot':'count'})
numb['Ratio']=numb['Nr']/numb['Tot']
numb=numb.sort_values(numb.columns[2])
numb=numb.reset_index()
numb
le_MIGSAME = LabelEncoder()
le_MIGSAME.classes_=numb.iloc[:, 0]
d_train.loc[:, 'MIGSAME'] = le_MIGSAME.transform(d_train.loc[:, 'MIGSAME'])
d_test.loc[:, 'MIGSAME'] = le_MIGSAME.transform(d_test.loc[:, 'MIGSAME'] )<categorify> | gbc_imp = pd.DataFrame({'Feature':train.columns, 'gbc importance':gbc.feature_importances_})
xgb_imp = pd.DataFrame({'Feature':train.columns, 'xgb importance':xgb.feature_importances_})
ran_imp = pd.DataFrame({'Feature':train.columns, 'ran importance':ran.feature_importances_})
ext_imp = pd.DataFrame({'Feature':train.columns, 'ext importance':ext.feature_importances_})
ada_imp = pd.DataFrame({'Feature':train.columns, 'ada importance':ada.feature_importances_})
importances = gbc_imp.merge(xgb_imp, on='Feature' ).merge(ran_imp, on='Feature' ).merge(ext_imp, on='Feature' ).merge(ada_imp, on='Feature')
importances['Average'] = importances.mean(axis=1)
importances = importances.sort_values(by='Average', ascending=False ).reset_index(drop=True ) | Titanic - Machine Learning from Disaster |
14,055,799 | numb=d_train.groupby(['MIGSUN'])['INCCAT'].agg({'Nr':'sum','Tot':'count'})
numb['Ratio']=numb['Nr']/numb['Tot']
numb=numb.sort_values(numb.columns[2])
numb=numb.reset_index()
numb
le_MIGSUN = LabelEncoder()
le_MIGSUN.classes_=numb.iloc[:, 0]
d_train.loc[:, 'MIGSUN'] = le_MIGSUN.transform(d_train.loc[:, 'MIGSUN'])
d_test.loc[:, 'MIGSUN'] = le_MIGSUN.transform(d_test.loc[:, 'MIGSUN'] )<drop_column> | mylist = list(importance1.index ) | Titanic - Machine Learning from Disaster |
14,055,799 | d_train=d_train.drop(['PARENT'],axis=1)
d_test=d_test.drop(['PARENT'],axis=1)
<drop_column> | train1 = pd.DataFrame()
test1 = pd.DataFrame()
for i in mylist:
train1[i] = train[i]
test1[i] = test[i]
train1.head() | Titanic - Machine Learning from Disaster |
14,055,799 | d_train=d_train.drop(['PEMNTVTY','PENATVTY'],axis=1)
d_test=d_test.drop(['PEMNTVTY','PENATVTY'],axis=1 )<categorify> | train = train1
test = test1
X_train = train
X_test = test
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test ) | Titanic - Machine Learning from Disaster |
14,055,799 | numb=d_train.groupby(['PEFNTVTY'])['INCCAT'].agg({'Nr':'sum','Tot':'count'})
numb['Ratio']=numb['Nr']/numb['Tot']
numb=numb.sort_values(numb.columns[2])
numb=numb.reset_index()
numb
le_PEFNTVTY = LabelEncoder()
le_PEFNTVTY.classes_=numb.iloc[:, 0]
d_train.loc[:, 'PEFNTVTY'] = le_PEFNTVTY.transform(d_train.loc[:, 'PEFNTVTY'])
d_test.loc[:, 'PEFNTVTY'] = le_PEFNTVTY.transform(d_test.loc[:, 'PEFNTVTY'] )<categorify> | ran = RandomForestClassifier(random_state=1)
knn = KNeighborsClassifier()
log = LogisticRegression()
xgb = XGBClassifier(random_state=1)
gbc = GradientBoostingClassifier(random_state=1)
svc = SVC(probability=True)
ext = ExtraTreesClassifier(random_state=1)
ada = AdaBoostClassifier(random_state=1)
gnb = GaussianNB()
gpc = GaussianProcessClassifier()
bag = BaggingClassifier(random_state=1)
models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag]
model_names = ['Random Forest', 'K Nearest Neighbor', 'Logistic Regression', 'XGBoost', 'Gradient Boosting', 'SVC', 'Extra Trees', 'AdaBoost', 'Gaussian Naive Bayes', 'Gaussian Process', 'Bagging Classifier']
scores2 = {}
for ind, mod in enumerate(models):
mod.fit(X_train, y_train)
acc = cross_val_score(mod, X_train, y_train, scoring = "accuracy", cv = 10)
scores2[model_names[ind]] = acc | Titanic - Machine Learning from Disaster |
14,055,799 | numb=d_train.groupby(['PRCITSHP'])['INCCAT'].agg({'Nr':'sum','Tot':'count'})
numb['Ratio']=numb['Nr']/numb['Tot']
numb=numb.sort_values(numb.columns[2])
numb=numb.reset_index()
numb
le_PRCITSHP = LabelEncoder()
le_PRCITSHP.classes_=numb.iloc[:, 0]
d_train.loc[:, 'PRCITSHP'] = le_PRCITSHP.transform(d_train.loc[:, 'PRCITSHP'])
d_test.loc[:, 'PRCITSHP'] = le_PRCITSHP.transform(d_test.loc[:, 'PRCITSHP'] )<categorify> | results = pd.DataFrame(scores2 ).T
results['mean'] = results.mean(1)
result_df = results.sort_values(by='mean', ascending=False)
result_df.head(11 ) | Titanic - Machine Learning from Disaster |
14,055,799 | numb=d_train.groupby(['VETQVA','VETYN'])['INCCAT'].agg({'Nr':'sum','Tot':'count'})
numb['Ratio']=numb['Nr']/numb['Tot']
numb=numb.sort_values(numb.columns[2])
numb=numb.reset_index()
numb
le_VET = LabelEncoder()
le_VET.classes_=numb.iloc[:, 0].astype(str)+","+ numb.iloc[:, 1].astype(str)
d_train['VET'] = le_VET.transform(d_train.loc[:, 'VETQVA'].astype(str)+ "," + d_train.loc[:,'VETYN'].astype(str))
d_test['VET'] = le_VET.transform(d_test.loc[:, 'VETQVA'].astype(str)+ "," + d_test.loc[:,'VETYN'].astype(str))
d_train=d_train.drop(['VETYN','VETQVA'],axis=1)
d_test=d_test.drop(['VETYN','VETQVA'],axis=1)
<create_dataframe> | Cs = [0.01, 0.1, 1, 5, 10, 15, 20, 50]
gammas = [0.001, 0.01, 0.1]
hyperparams = {'C': Cs, 'gamma': gammas}
gd = GridSearchCV(estimator=SVC(probability=True), param_grid=hyperparams, verbose=True, cv=5, scoring="accuracy", n_jobs=-1)
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_params_ ) | Titanic - Machine Learning from Disaster |
14,055,799 | d_train_bckp=d_train.copy()
d_test_bckp=d_test.copy()<remove_duplicates> | learning_rate = [0.01, 0.05, 0.1, 0.2, 0.5]
n_estimators = [100, 1000, 2000]
max_depth = [3, 5, 10, 15]
hyperparams = {'learning_rate': learning_rate, 'n_estimators': n_estimators}
gd = GridSearchCV(estimator=GradientBoostingClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring="accuracy", n_jobs=-1)
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_params_ ) | Titanic - Machine Learning from Disaster |
14,055,799 | d_train=d_train.drop_duplicates()<concatenate> | penalty = ['l1', 'l2']
C = np.logspace(0, 4, 10)
hyperparams = {'penalty': penalty, 'C': C}
gd = GridSearchCV(estimator=LogisticRegression() , param_grid=hyperparams, verbose=True, cv=5, scoring="accuracy", n_jobs=-1)
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_params_ ) | Titanic - Machine Learning from Disaster |
14,055,799 | add_tr=d_train[(d_train['INCCAT']==1)&(d_train['AAGE']>20)]
balanced_d_train=d_train.copy()
for i in range(0,round(balanced_d_train.shape[0]/add_tr.shape[0])) :
balanced_d_train=pd.concat([balanced_d_train,add_tr],axis=0,sort=False, ignore_index=True)
i=i+1<prepare_x_and_y> | learning_rate = [0.001, 0.005, 0.01, 0.05, 0.1, 0.2]
n_estimators = [10, 50, 100, 250, 500, 1000]
hyperparams = {'learning_rate': learning_rate, 'n_estimators': n_estimators}
gd = GridSearchCV(estimator = XGBClassifier() , param_grid=hyperparams, verbose=True, cv=5, scoring="accuracy", n_jobs=-1)
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_params_ ) | Titanic - Machine Learning from Disaster |
14,055,799 | gen_y_train=d_train['INCCAT'].copy()
balanced_y_train=balanced_d_train['INCCAT'].copy()<drop_column> | max_depth = [3, 4, 5, 6, 7, 8, 9, 10]
min_child_weight = [1, 2, 3, 4, 5, 6]
hyperparams = {'max_depth': max_depth, 'min_child_weight': min_child_weight}
gd = GridSearchCV(estimator=XGBClassifier(learning_rate=0.2, n_estimators=50),
param_grid=hyperparams, verbose=True, cv=5, scoring="accuracy", n_jobs=-1)
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_params_ ) | Titanic - Machine Learning from Disaster |
14,055,799 | d_train = d_train.drop('INCCAT', axis=1)
d_test = d_test.drop('INCCAT', axis=1)
balanced_d_train=balanced_d_train.drop('INCCAT', axis=1 )<prepare_x_and_y> | gamma = [i*0.1 for i in range(0,5)]
hyperparams = {'gamma': gamma}
gd = GridSearchCV(estimator= XGBClassifier(learning_rate=0.2, n_estimators=50, max_depth=8, min_child_weight=2),
param_grid=hyperparams, verbose=True, cv=5, scoring="accuracy", n_jobs=-1)
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_params_ ) | Titanic - Machine Learning from Disaster |
14,055,799 | X=balanced_d_train.copy()
y=balanced_y_train.copy()<normalization> | subsample = [0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1]
colsample_bytree = [0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1]
hyperparams = {'subsample': subsample, 'colsample_bytree': colsample_bytree}
gd = GridSearchCV(estimator=XGBClassifier(learning_rate=0.2, n_estimators=50, max_depth=8, min_child_weight=2, gamma=0),
param_grid=hyperparams, verbose=True, cv=5, scoring="accuracy", n_jobs=-1)
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_params_ ) | Titanic - Machine Learning from Disaster |
14,055,799 | train_norm = X
std_scale = preprocessing.StandardScaler().fit(train_norm)
x_train_norm = std_scale.transform(train_norm)
training_norm_col = pd.DataFrame(x_train_norm, index=train_norm.index, columns=train_norm.columns)
X.update(training_norm_col )<choose_model_class> | reg_alpha = [1e-5, 1e-2, 0.1, 1, 100]
hyperparams = {'reg_alpha': reg_alpha}
gd = GridSearchCV(estimator=XGBClassifier(learning_rate=0.2, n_estimator=50, max_depth=8, min_child_weight=2, gamma=0, subsample=0.7, colsample_bytree=0.65),
param_grid=hyperparams, verbose=True, cv=5, scoring="accuracy", n_jobs=-1)
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_params_ ) | Titanic - Machine Learning from Disaster |
14,055,799 | model = Sequential()
model.add(Dense(90, input_dim=X.shape[1], activation='relu'))
model.add(Dropout(rate=0.25, name='dropout_one'))
model.add(Dense(20, activation='sigmoid'))
model.add(Dense(7, activation='sigmoid'))
model.add(Dropout(rate=0.1, name='dropout_two'))
model.add(Dense(1, activation='sigmoid'))
<choose_model_class> | n_restarts_optimizer = [0, 1, 2, 3]
max_iter_predict = [1, 2, 5, 10, 20, 35, 50, 100]
warm_start = [True, False]
hyperparams = {'n_restarts_optimizer': n_restarts_optimizer, 'max_iter_predict': max_iter_predict, 'warm_start': warm_start}
gd = GridSearchCV(estimator=GaussianProcessClassifier() , param_grid=hyperparams, verbose=True, cv=5, scoring="accuracy", n_jobs=-1)
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_params_ ) | Titanic - Machine Learning from Disaster |
14,055,799 | opt=keras.optimizers.RMSprop(lr=0.003, rho=0.9)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['binary_accuracy'])
<train_model> | n_estimators = [10, 100, 200, 500]
learning_rate = [0.001, 0.01, 0.1, 0.5, 1, 1.5, 2]
hyperparams = {'n_estimators': n_estimators, 'learning_rate': learning_rate}
gd = GridSearchCV(estimator=AdaBoostClassifier() , param_grid=hyperparams, verbose=True, cv=5, scoring="accuracy", n_jobs=-1)
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_params_ ) | Titanic - Machine Learning from Disaster |
14,055,799 | np.random.seed(10)
kfold = StratifiedKFold(n_splits=10, shuffle=True)
cvscores = []
kfold.get_n_splits(X,y)
i=1
for train_index, test_index in kfold.split(X, y):
print("split Nr %d" % i)
X_tr_m1, X_te_m1 = X.iloc[train_index], X.iloc[test_index]
y_tr_m1, y_te_m1 = y.iloc[train_index], y.iloc[test_index]
model.fit(X_tr_m1, y_tr_m1, epochs=3, batch_size=6000, verbose=1)
scores = model.evaluate(X_te_m1, y_te_m1, verbose=0)
print("%s: %.2f%%" %(model.metrics_names[1], scores[1]*100))
cvscores.append(scores[1] * 100)
i=i+1
print("%.2f%%(+/- %.2f%%)" %(np.mean(cvscores), np.std(cvscores)) )<train_model> | n_neighbors = [1, 2, 3, 4, 5]
algorithm = ['auto']
weights = ['uniform', 'distance']
leaf_size = [1, 2, 3, 4, 5, 10]
hyperparams = {'algorithm':algorithm, 'weights': weights, 'leaf_size': leaf_size, 'n_neighbors': n_neighbors}
gd=GridSearchCV(estimator=KNeighborsClassifier() , param_grid=hyperparams, verbose=True, cv=5, scoring="accuracy", n_jobs=-1)
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_params_ ) | Titanic - Machine Learning from Disaster |
14,055,799 | opt = keras.optimizers.Adam(lr=0.03, beta_1=0.9, beta_2=0.999, amsgrad=False)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['binary_accuracy'])
model.fit(X, y, epochs=80, batch_size=8000, verbose=1 )<compute_train_metric> | n_estimators = [10, 50, 100, 200]
max_depth = [3, None]
max_features = [0.1, 0.2, 0.5, 0.8]
min_samples_split = [2, 6]
min_samples_leaf = [2, 6]
hyperparams = {'n_estimators': n_estimators, 'max_depth': max_depth, 'max_features': max_features,
'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf}
gd = GridSearchCV(estimator=RandomForestClassifier() , param_grid=hyperparams, verbose=True, cv=5, scoring="accuracy", n_jobs=-1)
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_params_ ) | Titanic - Machine Learning from Disaster |
14,055,799 | y_T=model.predict(X)
matrix = confusion_matrix(balanced_y_train, y_T.round())
matrix<compute_train_metric> | n_estimators = [10, 25, 50, 75, 100]
max_depth = [3, None]
max_features = [0.1, 0.2, 0.5, 0.8]
min_samples_split = [2, 10]
min_samples_leaf = [2, 10]
hyperparams = {'n_estimators': n_estimators, 'max_depth': max_depth, 'max_features': max_features, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf}
gd = GridSearchCV(estimator=ExtraTreesClassifier() , param_grid=hyperparams, verbose=True, cv=5, scoring="accuracy", n_jobs=-1)
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_params_ ) | Titanic - Machine Learning from Disaster |
14,055,799 | roc_auc_score(balanced_y_train,y_T.round() )<train_model> | n_estimators = [10, 50, 75, 100, 200]
max_samples = [0.1, 0.2, 0.5, 0.8, 1.0]
max_features = [0.1, 0.2, 0.5, 0.8, 1.0]
hyperparams = {'n_estimators': n_estimators, 'max_samples': max_samples, 'max_features': max_features}
gd = GridSearchCV(estimator=BaggingClassifier() , param_grid=hyperparams, verbose=True, cv=5, scoring="accuracy", n_jobs=-1)
gd.fit(X_train, y_train)
print(gd.best_score_)
print(gd.best_params_ ) | Titanic - Machine Learning from Disaster |
14,055,799 |
<define_variables> | ran = RandomForestClassifier(max_depth=None, max_features=0.1, min_samples_leaf=2, min_samples_split=6, n_estimators=100, random_state=1)
knn = KNeighborsClassifier(leaf_size=1, n_neighbors=5, weights='distance')
log = LogisticRegression(C=1.0, penalty='l2')
xgb = XGBClassifier(learning_rate=0.2, n_estimators=50, max_depth=8, min_child_weight=2, gamma=0, subsample=0.7, colsample_bytree=0.65, reg_alpha=1)
gbc = GradientBoostingClassifier(learning_rate=0.2, max_depth=3, n_estimators=1000)
svc = SVC(probability=True, gamma=0.001, C=20)
ext = ExtraTreesClassifier(max_depth=None, max_features=0.1, min_samples_leaf=2, min_samples_split=2, n_estimators=50, random_state=1)
ada = AdaBoostClassifier(learning_rate=0.5, n_estimators=200, random_state=1)
gpc = GaussianProcessClassifier(max_iter_predict=1, n_restarts_optimizer=0, warm_start=True)
bag = BaggingClassifier(max_features=0.5, max_samples=1.0, n_estimators=75, random_state=1)
models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag]
model_names = ['Random Forest', 'K Nearest Neighbour', 'Logistic Regression', 'XGBoost', 'Gradient Boosting', 'SVC', 'Extra Trees', 'AdaBoost', 'Gaussian Naive Bayes', 'Gaussian Process', 'Bagging Classifier']
scores3 = {}
for ind, mod in enumerate(models):
mod.fit(X_train, y_train)
acc = cross_val_score(mod, X_train, y_train, scoring = "accuracy", cv = 10)
scores3[model_names[ind]] = acc | Titanic - Machine Learning from Disaster |
14,055,799 |
<prepare_x_and_y> | grid_hard = VotingClassifier(estimators = [('Random Forest', ran),
('Logistic Regression', log),
('XGBoost', xgb),
('Gradient Boosting', gbc),
('Extra Trees', ext),
('AdaBoost', ada),
('Gaussian Process', gpc),
('SVC', svc),
('K Nearest Neighbor', knn),
('Bagging Classifier', bag)], voting='hard')
grid_hard_cv = model_selection.cross_validate(grid_hard, X_train, y_train, cv=10)
grid_hard.fit(X_train, y_train)
print("Hard voting on test set score mean: {:.2f}".format(grid_hard_cv['test_score'].mean() *100)) | Titanic - Machine Learning from Disaster |
14,055,799 | X_T=d_test.copy()
test_norm = X_T
x_test_norm = std_scale.transform(test_norm)
testing_norm_col = pd.DataFrame(x_test_norm, index=test_norm.index, columns=test_norm.columns)
X_T.update(testing_norm_col )<predict_on_test> | grid_soft = VotingClassifier(estimators = [('Random Forest', ran),
('Logistic Regression', log),
('XGBoost', xgb),
('Gradient Boosting', gbc),
('Extra Trees', ext),
('AdaBoost', ada),
('Gaussian Process', gpc),
('SVC', svc),
('K Nearest Neighbor', knn),
('Bagging Classifier', bag)], voting='soft')
grid_soft_cv = model_selection.cross_validate(grid_soft, X_train, y_train, cv=10)
grid_soft.fit(X_train, y_train)
print("Soft voting on test set score mean: {:.2f}".format(grid_soft_cv['test_score'].mean() *100)) | Titanic - Machine Learning from Disaster |
14,055,799 | y_Test_lev1_pred=model.predict(X_T )<feature_engineering> | predictions = grid_hard.predict(X_test)
submission = pd.concat([pd.DataFrame(passId), pd.DataFrame(predictions)], axis='columns')
submission.columns = ["PassengerId", "Survived"]
submission.to_csv('titanic_submission1.csv', header=True, index=False ) | Titanic - Machine Learning from Disaster |
14,055,799 | <feature_engineering><EOS> | predictions = grid_soft.predict(X_test)
submission = pd.concat([pd.DataFrame(passId), pd.DataFrame(predictions)], axis='columns')
submission.columns = ["PassengerId", "Survived"]
submission.to_csv('titanic_submission2.csv', header=True, index=False ) | Titanic - Machine Learning from Disaster |
14,262,317 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv> | %matplotlib inline
| Titanic - Machine Learning from Disaster |
14,262,317 | sub=pd.read_csv('/kaggle/input/ml-challenge-week6/sampleSubmission.csv')
sub["income class"]=d_test["y_new"]<save_to_csv> | train_df = pd.read_csv(".. /input/titanic/train.csv")
test_df = pd.read_csv(".. /input/titanic/test.csv")
combine = [train_df, test_df] | Titanic - Machine Learning from Disaster |
14,262,317 | filename = 'submission.csv'
sub.to_csv(filename,index=False )<import_modules> | train_df.isnull().sum() | Titanic - Machine Learning from Disaster |
14,262,317 | import os
<import_modules> | print('_'*40)
test_df.isnull().sum() | Titanic - Machine Learning from Disaster |
14,262,317 | import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
<load_from_csv> | train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by='Survived',
ascending=False ) | Titanic - Machine Learning from Disaster |
14,262,317 | df_train = pd.read_csv('.. /input/svnit-ml-1/train.csv')
df_test = pd.read_csv('.. /input/testacm1/test.csv')
<prepare_x_and_y> | train_df[["Sex", "Survived"]].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived',
ascending=False ) | Titanic - Machine Learning from Disaster |
14,262,317 | train_X = df_train.loc[:, 'X1':'X23']
train_y = df_train.loc[:, 'Y']
<load_pretrained> | train_df[["SibSp", "Survived"]].groupby(['SibSp'], as_index=False ).mean().sort_values(by='Survived',
ascending=False ) | Titanic - Machine Learning from Disaster |
14,262,317 | rf = RandomForestClassifier(n_estimators=50, random_state=123)
<train_model> | train_df[["Parch", "Survived"]].groupby(['Parch'], as_index=False ).mean().sort_values(by='Survived',
ascending=False ) | Titanic - Machine Learning from Disaster |
14,262,317 | rf.fit(train_X, train_y)
<predict_on_test> | for dataset in combine:
dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False)
pd.crosstab(train_df['Title'], train_df['Sex'] ) | Titanic - Machine Learning from Disaster |
14,262,317 | df_test = df_test.loc[:, 'X1':'X23']
pred = rf.predict_proba(df_test)
<save_to_csv> | for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
train_df[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean() | Titanic - Machine Learning from Disaster |
14,262,317 | result = pd.DataFrame(pred[:,1])
result.columns = ['Y']
result.index.name = 'ID'
result.index += 1
result.to_csv('output.csv')
<define_variables> | title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train_df.head() | Titanic - Machine Learning from Disaster |
14,262,317 | seed = 300<import_modules> | train_df = train_df.drop(['Name', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name', 'PassengerId'], axis=1)
combine = [train_df, test_df]
train_df.shape, test_df.shape | Titanic - Machine Learning from Disaster |
14,262,317 | from math import *
import math
import numpy as np
import pandas as pd<load_from_csv> | for dataset in combine:
dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0} ).astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
14,262,317 | Train = pd.read_csv(".. /input/hta-tagging/train.csv")
Train.head(5 )<load_from_csv> | guess_ages = np.zeros(( 2,3))
guess_ages | Titanic - Machine Learning from Disaster |
14,262,317 | Test = pd.read_csv(".. /input/hta-tagging/test.csv")
Test.head(5 )<feature_engineering> | for dataset in combine:
for i in range(0, 2):
for j in range(0, 3):
guess_df = dataset[(dataset['Sex'] == i)&(dataset['Pclass'] == j+1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i,j] = int(age_guess/0.5 + 0.5)* 0.5
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[(dataset.Age.isnull())&(dataset.Sex == i)&(dataset.Pclass == j+1),'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
14,262,317 | def ReadFileToDataFrame(df,path):
for i in range(len(df.Filename)) :
Filename = df.Filename[i];
CurrentFile = ".. /input/hta-tagging/{2}/{2}/{0}/{1}".format(Filename.split("-")[0],Filename,path);
File = open(CurrentFile, "r");
contents = File.read() ;
contents = contents.replace("\r
","
")
contents = contents.replace("\r","
")
df.Filename[i] = contents
continue;
df = df.rename(columns={"Filename":"Text"},inplace=True);
ReadFileToDataFrame(Train,"train-data");
Train.head(5 )<feature_engineering> | train_df['AgeBand'] = pd.cut(train_df['Age'], 5)
train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False ).mean().sort_values(by='AgeBand', ascending=True ) | Titanic - Machine Learning from Disaster |
14,262,317 | def ReadTestFileToDataFrame(df,path):
df["Text"] = [""]*len(df.Id);
for i in range(len(df.Id)) :
Filename = df.Id[i];
CurrentFile = ".. /input/hta-tagging/{2}/{2}/{0}/{1}".format(Filename.split("-")[0],Filename,path);
def Do(File):
contents = File.read() ;
contents = contents.replace("\r
","
")
contents = contents.replace("\r","
")
df.Text[i] = contents
try:
with codecs.open(CurrentFile, encoding='cp1252')as File:
Do(File);
except:
with codecs.open(CurrentFile, encoding='utf-8')as File:
Do(File);
continue;
df = df.rename(columns={"Filename":"Text"},inplace=True);
ReadTestFileToDataFrame(Test,"test-data");
Test.head(5 )<categorify> | for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] = 4
train_df.head() | Titanic - Machine Learning from Disaster |
14,262,317 | def AnsEncoder(df, col_name):
for i in range(len(df[col_name])) :
val = 0;
txt = df.loc[i,col_name];
if(txt == "P"): val = 2;
if(txt == "Q"): val = 1;
if(txt == "N"): val = 0;
df.loc[i,col_name] = val;
continue;
df[col_name] = pd.to_numeric(df[col_name]);<categorify> | train_df = train_df.drop(['AgeBand'], axis=1)
combine = [train_df, test_df]
train_df.head() | Titanic - Machine Learning from Disaster |
14,262,317 | AnsEncoder(Train, "Blinding of intervention");
AnsEncoder(Train, "Blinding of Outcome assessment");
Train.head(5 )<feature_engineering> | for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,262,317 | def remove_ref(text):
newtext = "";
for line in text.split("
"):
if(line.lower() =="references" or line.lower() =="reference"): break;
else: newtext += line + "
";
return text;
Train.Text = Train.Text.apply(lambda x : remove_ref(x))
Test.Text = Test.Text.apply(lambda x : remove_ref(x))<string_transform> | for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False ).mean() | Titanic - Machine Learning from Disaster |
14,262,317 | def FindBeforeColon(text):
def kwreplace(cutted):
cutted = cutted.lower() ;
if("key" in cutted): cutted = "key";
elif("objective" in cutted): cutted = "objective";
elif("reference" in cutted): cutted = "reference";
elif("method" in cutted): cutted = "method";
elif(cutted.endswith("es")) : cutted = cutted[:-2];
elif(cutted.endswith("s")) : cutted = cutted[:-1];
return cutted;
tmptxt = "";
curtopic = "";
topic = [];
for line in text.split("
"):
if(":" in line):
cutted = line.split(":")[0];
if(len(cutted)>0):
if(len(cutted.split(" ")) < 6 and cutted[0] in "ABCDEFGHIJKLMNOPQRSTUVWXYZ" and(not "Author" in cutted)) :
cutted = kwreplace(cutted);
if(curtopic != ""):
topic.append(( curtopic, tmptxt)) ;
tmptxt = "";
curtopic = cutted;
tmptxt = "".join(line.split(":")[1:]);
continue;
if(len(line)>0):
if(not " " in line and line[0] in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
if(curtopic != ""):
topic.append(( curtopic, tmptxt)) ;
curtopic = kwreplace(line);
tmptxt = "";
continue;
tmptxt += "
" + line;
return topic;<categorify> | train_df = train_df.drop(['Parch', 'SibSp'], axis=1)
test_df = test_df.drop(['Parch', 'SibSp'], axis=1)
combine = [train_df, test_df]
train_df.head() | Titanic - Machine Learning from Disaster |
14,262,317 | TO = [];
i = 0;
for txt in Train.Text:
TO.append(( i,FindBeforeColon(txt)));
i += 1
TestO = [];
i = 0;
for txt in Test.Text:
TestO.append(( i,FindBeforeColon(txt)));
i += 1;<define_variables> | freq_port = train_df.Embarked.dropna().mode() [0]
print(freq_port ) | Titanic - Machine Learning from Disaster |
14,262,317 | SelectedCols = ["Text","method","key"]<save_to_csv> | for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,262,317 | def WriteToPD(df,arr):
arrlength = len(arr);
PlaceholderList = [None]*df.shape[0];
for idx, optionlist in arr:
if(idx % 100 == 0):
print("{0}/{1} Completed".format(idx,arrlength)) ;
for option, text in optionlist:
if option in SelectedCols:
if not option in df:
df[option] = PlaceholderList;
df.loc[idx,option] = text;<save_to_csv> | for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
14,262,317 | WriteToPD(Train,TO)
Train.head()<save_to_csv> | test_df['Fare'].fillna(test_df['Fare'].dropna().median() , inplace=True ) | Titanic - Machine Learning from Disaster |
14,262,317 | WriteToPD(Test,TestO)
Train.head()<choose_model_class> | train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False ).mean().sort_values(by='FareBand', ascending=True ) | Titanic - Machine Learning from Disaster |
14,262,317 | class TFIDFVector:
def __init__(this,train,val):
print("- TF-IDF Vector: Initializing")
this.tfv = TfidfVectorizer(min_df=3, max_features=None,
strip_accents='unicode', analyzer='word',token_pattern=r'\w{1,}',
ngram_range=(1, 3), use_idf=1,smooth_idf=1,sublinear_tf=1,
stop_words = 'english');
print("- TF-IDF Vector: Preparing");
if type(train[0])!= str:
tl2 = [];
for l in train:
tl2 += [txt for txt in l if txt != None];
for l in val:
tl2 += [txt for txt in l if txt != None];
print("- TF-IDF Vector: Fitting, This might take a while");
this.tfv.fit(tl2);
print("- TF-IDF Vector: Fitted");
del(tl2);
else:
print("- TF-IDF Vector: Fitting, This might take a while");
this.tfv.fit(list(train)+list(val)) ;
print("- TF-IDF Vector: Fitted");
print("- TF-IDF Vector: Initialized")
def CreateVector(this, val, reduceshape = True):
print("- TF-IDF Vector: Creating Vector")
if type(val[0])!= str:
ValLength = len(val);
tmplist = []; i = 0;
for ll in val:
if(i % max(ValLength//10,1)== 0): print("- TF-IDF Vector: Creating Vector {0}/{1} Completed".format(i,ValLength)) ;
tmplist.append(( this.tfv.transform(['' if txt is None else txt for txt in ll])));
i += 1;
v = np.stack(np.array([x.toarray() for x in tmplist]))
del(tmplist)
if(reduceshape):
nsamples = v.shape[0];
value2 = np.prod(v.shape[1:])
v = v.reshape(( nsamples,value2))
to_return = v;
else:
to_return = this.tfv.transform(val)
print("- TF-IDF Vector: Vector Created")
return to_return;
<train_model> | for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
train_df.head(10 ) | Titanic - Machine Learning from Disaster |
14,262,317 | class SVMModel:
def __init__(this):
return;
def Train(this,X, Y):
print("SVMModel: Training Step will be combined with Predict State")
this.X = X; this.Y = Y;
def SetVector(this,Vector):
this.VC = Vector;
def Predict(this,XVal):
X = this.VC.CreateVector(this.X); Y = this.Y;
del(this.X);
this.svc = SVC(random_state=seed)
print("SVMModel: Training")
this.svc.fit(X, Y); del(X); del(Y);
print("SVMModel: Finished Training")
print("SVMModel: Predicting");
valv = this.VC.CreateVector(XVal);
del(XVal);
tR = this.svc.predict(valv);
del(valv);
print("SVMModel: Predicted");
return tR;<predict_on_test> | X_train = train_df.drop("Survived", axis=1)
y_train = train_df["Survived"]
print("X_train.shape" ,X_train.shape)
print("y_train.shape" ,y_train.shape)
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2, random_state=111 ) | Titanic - Machine Learning from Disaster |
14,262,317 | def PredictModel(TX,TY,VX,Model):
print("Initializing Model")
model = Model;
print("Training Model")
model.Train(( TrainX),(TrainY)) ;
print("Predicting")
global PredictedY;
PredictedY = model.Predict(VX);
return PredictedY.astype(int);<prepare_x_and_y> | logreg = LogisticRegression()
logreg.fit(X_train, y_train)
Y_pred_lr = logreg.predict(X_test)
Score_lr = accuracy_score(y_test,Y_pred_lr)
print(Score_lr ) | Titanic - Machine Learning from Disaster |
14,262,317 | TrainX, ValX, TrainY, ValY = TTS(Train[SelectedCols], Train['Blinding of intervention'], test_size=0.2, random_state = seed);
_1, _2, _3, ValY2 = TTS(Train[SelectedCols], Train['Blinding of Outcome assessment'], test_size=0.2, random_state = seed);
TrainX = np.array(TrainX);
TrainY = np.array(TrainY);
ValX = np.array(ValX);
ValY = np.array(ValY);
ValY2 = np.array(ValY2);
TrainY2 = Train['Blinding of Outcome assessment']; TrainY2 = np.array(TrainY2);
TestX = Test[SelectedCols]; TestX = np.array(TestX);
Vector = TFIDFVector(np.array(Train[SelectedCols]), TestX);
ValX = Vector.CreateVector(ValX)
SVM = SVMModel() ;
SVM.SetVector(Vector);
PredA = PredictModel(TrainX, TrainY , TestX, SVM);
VPredA = SVM.svc.predict(ValX);
AccA = ScoreAcc(ValY,VPredA)
SVM = SVMModel() ;
SVM.SetVector(Vector);
PredB = PredictModel(TrainX, TrainY2, TestX, SVM);
VPredB = SVM.svc.predict(ValX)
AccB = ScoreAcc(ValY2,VPredB);
def Convert(i):
if i == 0: return "N";
if i == 1: return "Q";
if i == 2: return "P";
FinalPred = [Convert(PredA[i])+Convert(PredB[i])for i in range(len(PredA)) ];<save_to_csv> | svc = SVC()
svc.fit(X_train, y_train)
Y_pred_svc = svc.predict(X_test)
Score_svc = accuracy_score(y_test,Y_pred_svc)
print(Score_svc ) | Titanic - Machine Learning from Disaster |
14,262,317 | Test.Prediction = FinalPred;
SubmissionTest = Test[["Id","Prediction"]];
SubmissionTest.to_csv("submission.csv",index=False)
SubmissionTest.head(5);<compute_test_metric> | knn = KNeighborsClassifier(n_neighbors = 3)
knn.fit(X_train, y_train)
Y_pred_knn = knn.predict(X_test)
Score_knn = accuracy_score(y_test,Y_pred_knn)
print(Score_knn ) | Titanic - Machine Learning from Disaster |
14,262,317 | def Convert(i):
if i == 0: return "N";
if i == 1: return "Q";
if i == 2: return "P";
FinalPred = [Convert(VPredA[i])+Convert(VPredB[i])for i in range(len(VPredA)) ];
CorrectAns = [Convert(ValY[i])+Convert(ValY2[i])for i in range(len(ValY)) ];
FinAcc = ScoreAcc(CorrectAns,FinalPred)*100;
print("A set = {0:.2f}%".format(AccA))
print("B set = {0:.2f}%".format(AccB))
print("คะแนนที่คาดว่าจะได้ = {0:.2f}%".format(FinAcc))<set_options> | gaussian = GaussianNB()
gaussian.fit(X_train, y_train)
Y_pred_gnb = gaussian.predict(X_test)
Score_gnb = accuracy_score(y_test,Y_pred_gnb)
print(Score_gnb ) | Titanic - Machine Learning from Disaster |
14,262,317 | print(os.listdir(".. /input"))
warnings.filterwarnings('ignore')
plt.rcParams['figure.figsize'] =(16,9)
sns.set_palette('gist_earth' )<load_from_csv> | perceptron = Perceptron()
perceptron.fit(X_train, y_train)
Y_pred_per = perceptron.predict(X_test)
Score_per = accuracy_score(y_test,Y_pred_per)
print(Score_per ) | Titanic - Machine Learning from Disaster |
14,262,317 | df_train = pd.read_csv('.. /input/train.csv')
df_test = pd.read_csv('.. /input/test.csv')
full = pd.concat([df_train, df_test], axis = 0, sort=True)
full.set_index('PassengerId', drop = False, inplace=True)
train = full[:891]
display(full.head() )<categorify> | linear_svc = LinearSVC()
linear_svc.fit(X_train, y_train)
Y_pred_lsvc = linear_svc.predict(X_test)
Score_lsvc = accuracy_score(y_test,Y_pred_lsvc)
print(Score_lsvc ) | Titanic - Machine Learning from Disaster |
14,262,317 | def parse_Cabin(cabin):
if type(cabin)== str:
m = re.search(r'([A-Z])+', cabin)
return m.group(1)
else:
return 'X'
full['Cabin_short'] = full['Cabin'].map(parse_Cabin )<categorify> | sgd = SGDClassifier()
sgd.fit(X_train, y_train)
Y_pred_sgd = sgd.predict(X_test)
Score_sgd = accuracy_score(y_test,Y_pred_sgd)
print(Score_sgd ) | Titanic - Machine Learning from Disaster |
14,262,317 | dict_fare_by_Pclass = dict(full.groupby('Pclass' ).Fare.mean())
print(dict_fare_by_Pclass)
missing_fare = full.loc[full.Fare == 0,'Pclass'].map(dict_fare_by_Pclass)
full.loc[full.Fare == 0,'Fare'] = missing_fare
missing_fare = full.loc[full.Fare.isnull() ,'Pclass'].map(dict_fare_by_Pclass)
full.loc[full.Fare.isnull() ,'Fare'] = missing_fare
display(full.head(180))
<create_dataframe> | decision_tree = DecisionTreeClassifier()
decision_tree.fit(X_train, y_train)
Y_pred_dtr = decision_tree.predict(X_test)
Score_dtr = accuracy_score(y_test,Y_pred_dtr)
print(Score_dtr ) | Titanic - Machine Learning from Disaster |
14,262,317 | features = pd.DataFrame()
features['Pclass'] = full['Pclass']
features['Fare'] = full['Fare']
features['Sex'] = full['Sex']<data_type_conversions> | random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, y_train)
Y_pred_rf = random_forest.predict(X_test)
Score_rf = accuracy_score(y_test,Y_pred_rf)
print(Score_rf ) | Titanic - Machine Learning from Disaster |
14,262,317 | features['A5'] =(full['Ticket_short'] == 'A5' ).astype(int)
features['PC'] =(full['Ticket_short'] == 'PC' ).astype(int)
print(features )<define_variables> | models = pd.DataFrame({
'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron',
'Stochastic Gradient Decent', 'Linear SVC',
'Decision Tree'],
'Score': [Score_svc, Score_knn, Score_lr,
Score_rf, Score_gnb, Score_per,
Score_sgd, Score_lsvc, Score_dtr]})
models.sort_values(by='Score', ascending=False ) | Titanic - Machine Learning from Disaster |
14,262,317 | dict_Title = {"Capt": "Officer",
"Col": "Officer",
"Major": "Officer",
"Jonkheer": "Royalty",
"Don": "Royalty",
"Sir" : "Royalty",
"Dr": "Officer",
"Rev": "Officer",
"the Countess":"Royalty",
"Dona": "Royalty",
"Mme": "Mrs",
"Mlle": "Miss",
"Ms": "Mrs",
"Mr" : "Mr",
"Mrs" : "Mrs",
"Miss" : "Miss",
"Master" : "Master",
"Lady" : "Royalty"
}
title = title.map(dict_Title)
plt.figure(figsize =(14,6))
sns.violinplot(x = title, y = full['Age']);<categorify> | Y_pred = linear_svc.predict(test_df ) | Titanic - Machine Learning from Disaster |
14,262,317 | df_title = pd.DataFrame(title ).join(full[['Age','Survived']])
dict_age = df_title.groupby('Name' ).Age.mean()
idx = full.Age.isnull()
full.loc[idx,'Age'] = df_title.loc[idx, 'Name'].map(dict_age)
display(full.head(6))<data_type_conversions> | submission = pd.read_csv('.. /input/titanic/gender_submission.csv')
submission['Survived'] = Y_pred
submission.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
14,257,711 | features['Title'] = df_title['Name']
features['Child'] =(full['Age'] <= 14 ).astype(int)
print(features )<feature_engineering> | data_raw = pd.read_csv('.. /input/titanic/train.csv')
data_val = pd.read_csv('.. /input/titanic/test.csv')
data1 = data_raw.copy(deep=True)
data_cleaner = [data1, data_val]
Target = ['Survived'] | Titanic - Machine Learning from Disaster |
14,257,711 | def parse_surname(name):
return name.split(',')[0]
family = pd.DataFrame(full[['Parch','SibSp','Ticket']])
family['Family_size'] = 1 + family.Parch + family.SibSp
family['Surname'] = full.Name.map(parse_surname)
dict_scount = dict(family.groupby('Surname' ).Family_size.count())
dict_scode = dict(zip(dict_scount.keys() , range(len(dict_scount))))
family['Surname_code'] = family['Surname'].map(dict_scode)
family['Surname_count'] = family['Surname'].map(dict_scount)
display(full[family.Surname == 'Smith'])
print(family )<feature_engineering> | for dataset in data_cleaner:
dataset.Age.fillna(dataset.Age.median() , inplace=True)
dataset.Embarked.fillna('S', inplace=True)
dataset.Fare.fillna(dataset.Fare.median() , inplace=True)
dataset['Family_members'] = dataset.Parch + dataset.SibSp | Titanic - Machine Learning from Disaster |
14,257,711 | surname2chk = family[family['Family_size'] < family['Surname_count']].Surname.unique()
family['Surname_adj'] = family['Surname']
for s in surname2chk:
family_regroup = family[family['Surname'] == s]
fam_code_dict = tick2fam_gen(family_regroup)
for idx in family_regroup.index:
curr_ticket = full.loc[idx].Ticket
fam_code = fam_code_dict[curr_ticket]
if family_regroup.loc[idx, 'Family_size'] == 1:
if family_regroup.Ticket.value_counts() [curr_ticket] > 1:
family.loc[idx, 'Surname_adj'] = s + '-hidfam' + fam_code
else:
family.loc[idx, 'Surname_adj'] = s + '-single' + fam_code
else:
family.loc[idx, 'Surname_adj'] = s + '-fam' + fam_code
display(family[family.Surname == 'Smith'] )<count_unique_values> | data1.drop(['Name', 'PassengerId', 'Ticket', 'SibSp', 'Parch'], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
14,257,711 | dict_fcount = dict(family.groupby('Surname_adj' ).Family_size.count())
dict_fcode = dict(zip(dict_fcount.keys() , range(len(dict_fcount))))
family['Family_code'] = family['Surname_adj'].map(dict_fcode)
family['Family_count'] = family['Surname_adj'].map(dict_fcount)
display(family[family.Surname == 'Smith'])
print(f"No.of Family Before Regrouping: {len(family.Surname_code.unique())}")
print(f"No.of Family After Regrouping: {len(family.Family_code.unique())}" )<categorify> | for dataset in data_cleaner:
dataset['Cabin_Allotted'] = np.where(dataset.Cabin.isnull() , 0, 1)
dataset.drop('Cabin', axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
14,257,711 | group = pd.DataFrame(family[['Surname_code','Surname_count','Family_code','Family_count']])
dict_tcount = dict(full.groupby('Ticket' ).PassengerId.count())
dict_tcode = dict(zip(dict_tcount.keys() ,range(len(dict_tcount))))
group['Ticket_code'] = full.Ticket.map(dict_tcode)
group['Ticket_count'] = full.Ticket.map(dict_tcount)
print(f"No.of Tickets Identified: {len(group['Ticket_code'].unique())}")
display(full[(full.Ticket == 'A/4 48871')|(full.Ticket == 'A/4 48873')] )<categorify> | lb = LabelEncoder()
for dataset in data_cleaner:
dataset['Sex_labeled'] = lb.fit_transform(dataset.Sex)
dataset['AgeBin'] = pd.qcut(dataset.Age, 3)
dataset['Age_labeled'] = lb.fit_transform(dataset['AgeBin'])
dataset['FareBin'] = pd.qcut(dataset.Fare, 4)
dataset['Fare_labeled'] = lb.fit_transform(dataset['FareBin'])
dataset['Embarked_labeled'] = lb.fit_transform(dataset.Embarked ) | Titanic - Machine Learning from Disaster |
14,257,711 | def ChainCombineGroups(df, colA, colB):
data = df.copy()
search_df = data.copy()
group_count = 0
while not search_df.empty:
pool = search_df.iloc[:1]
idx = pool.index
search_df.drop(index = idx, inplace = True)
flag_init = 1
update = pd.DataFrame()
while(flag_init or not update.empty):
flag_init = 0
pool_A_uniq = np.unique(pool[colA])
pool_B_uniq = np.unique(pool[colB])
for col in [colA,colB]:
idx = []
for num in np.unique(pool[col]):
idx.extend(search_df[search_df[col] == num].index)
update = search_df.loc[idx]
pool = pd.concat([pool, update], axis = 0)
search_df = search_df.drop(index = idx)
data.loc[pool.index, 'Group_'] = group_count
group_count += 1
return np.array(data['Group_'].astype(int))<groupby> | print(data1['Age_labeled'].value_counts())
print(data1['Fare_labeled'].value_counts() ) | Titanic - Machine Learning from Disaster |
14,257,711 | group['Group_code'] = ChainCombineGroups(group, 'Family_code', 'Ticket_code')
dict_gcount = dict(group.groupby('Group_code' ).Family_code.count())
group['Group_count'] = group.Group_code.map(dict_gcount)
print(f"Family: {len(family['Family_code'].unique())}")
print(f"Group: {len(group['Ticket_code'].unique())}")
print(f"Combined: {len(group['Group_code'].unique())}
")
print('An example of grouping the both friends and family under a same group:')
display(pd.concat([full['Ticket'],family[['Surname','Family_code']],group[['Ticket_code','Group_code']]], axis = 1)[group['Group_code'] == 458] )<concatenate> | MLA = [
ensemble.AdaBoostClassifier() ,
ensemble.BaggingClassifier() ,
ensemble.ExtraTreesClassifier() ,
ensemble.GradientBoostingClassifier() ,
ensemble.RandomForestClassifier() ,
neighbors.KNeighborsClassifier() ,
tree.DecisionTreeClassifier() ,
tree.ExtraTreeClassifier() ,
XGBClassifier(objective='binary:logistic', eval_metric='logloss')
]
cv_split = model_selection.ShuffleSplit(n_splits=10, test_size=.2, train_size=.8, random_state=1)
MLA_columns = ['MLA Name', 'MLA Parameters', 'MLA Train Accuracy Mean', 'MLA Test Accuracy Mean', 'MLA Test Accuracy 3*STD', 'MLA Time']
MLA_compare = pd.DataFrame(columns = MLA_columns)
row_index = 0
for alg in MLA:
MLA_name = alg.__class__.__name__
MLA_compare.loc[row_index, 'MLA Name'] = MLA_name
MLA_compare.loc[row_index, 'MLA Parameters'] = str(alg.get_params())
cv_results = model_selection.cross_validate(alg, data1[data1_X], data1[Target].values.reshape(-1,), cv=cv_split, return_train_score=True)
MLA_compare.loc[row_index, 'MLA Time'] = cv_results['fit_time'].mean()
MLA_compare.loc[row_index, 'MLA Train Accuracy Mean'] = cv_results['train_score'].mean()
MLA_compare.loc[row_index, 'MLA Test Accuracy Mean'] = cv_results['test_score'].mean()
MLA_compare.loc[row_index, 'MLA Test Accuracy 3*STD'] = cv_results['test_score'].std() *3
row_index += 1
MLA_compare | Titanic - Machine Learning from Disaster |
14,257,711 | group_final = pd.concat([family[['Surname_code','Surname_count','Family_code','Family_count']],
group[['Ticket_code','Ticket_count','Group_code','Group_count']],
full['Survived']], axis = 1 )<groupby> | grid_n_estimator = [10, 50, 100, 300]
grid_ratio = [.1,.25,.5,.75, 1.0]
grid_learn = [.01,.03,.05,.1,.25]
grid_max_depth = [1, 2, 3, 4, 5, 6, 7, 8, 10, None]
grid_min_samples = [5, 10,.03,.05,.10]
grid_criterion = ['gini', 'entropy']
grid_seed = [1]
grid_param = [
[{
'learning_rate': grid_learn,
'n_estimators': grid_n_estimator,
'random_state': grid_seed
}],
[{
'max_samples': grid_ratio,
'n_estimators': grid_n_estimator,
'random_state': grid_seed
}],
[{
'n_estimators': grid_n_estimator,
'criterion': grid_criterion,
'max_depth': grid_max_depth,
'random_state': grid_seed
}],
[{
'learning_rate': grid_learn,
'n_estimators': grid_n_estimator,
'max_depth': grid_max_depth,
'random_state': grid_seed
}],
[{
'n_estimators': grid_n_estimator,
'criterion': grid_criterion,
'max_depth': grid_max_depth[:-1],
'random_state': grid_seed,
}],
[{
'n_neighbors': grid_max_depth[:-1],
'weights': ['uniform', 'distance'],
'algorithm': ['auto', 'ball_tree', 'kd_tree', 'brute']
}],
[{
'criterion': grid_criterion,
'max_depth': grid_max_depth,
'random_state': grid_seed,
}],
[{
'criterion': grid_criterion,
'max_depth': grid_max_depth,
'random_state': grid_seed,
}],
[{
'learning_rate': grid_learn,
'max_depth': grid_max_depth[:-1],
'n_estimators': grid_n_estimator,
'seed': grid_seed,
'objective': ['binary:logistic'],
'eval_metric': ['logloss']
}]
]
row_index = 0
start_total = time.perf_counter()
for clf, param in zip(MLA, grid_param):
start = time.perf_counter()
best_search = model_selection.GridSearchCV(estimator=clf, param_grid=param, cv=cv_split, scoring='roc_auc')
best_search.fit(data1[data1_X], data1[Target].values.reshape(-1,))
run = time.perf_counter() - start
best_param = best_search.best_params_
print(f'The best parameter for {clf.__class__.__name__} is {best_param} with a runtime of {run:.2f} seconds.')
print('-'*60)
clf.set_params(**best_param)
MLA_name = clf.__class__.__name__
MLA_compare.loc[row_index, 'MLA Name'] = MLA_name
MLA_compare.loc[row_index, 'MLA Parameters'] = str(clf.get_params())
cv_results = model_selection.cross_validate(clf, data1[data1_X], data1[Target].values.reshape(-1,), cv=cv_split, return_train_score=True)
MLA_compare.loc[row_index, 'MLA Time'] = cv_results['fit_time'].mean()
MLA_compare.loc[row_index, 'MLA Train Accuracy Mean'] = cv_results['train_score'].mean()
MLA_compare.loc[row_index, 'MLA Test Accuracy Mean'] = cv_results['test_score'].mean()
MLA_compare.loc[row_index, 'MLA Test Accuracy 3*STD'] = cv_results['test_score'].std() *3
row_index += 1
run_total = time.perf_counter() - start_total
print('-'*60)
print(f'Total optimization time was {run_total / 60:.2f} minutes.')
print('-'*60)
MLA_compare.sort_values(by=['MLA Test Accuracy Mean'], ascending=False, inplace=True)
MLA_compare | Titanic - Machine Learning from Disaster |
14,257,711 | for param in [('Surname_code','Surname_count'),
('Family_code','Family_count'),
('Ticket_code','Ticket_count'),
('Group_code','Group_count')]:
n_member_survived_by_gp = group_final.groupby(param[0] ).Survived.sum()
n_mem_survived = group_final[param[0]].map(n_member_survived_by_gp)
n_mem_survived_adj = n_mem_survived - group_final.Survived.apply(lambda x: 1 if x == 1 else 0)
n_member_dead_by_gp = group_final.groupby(param[0] ).Survived.count() - group_final.groupby(param[0] ).Survived.sum()
n_mem_dead = group_final[param[0]].map(n_member_dead_by_gp)
n_mem_dead_adj = n_mem_dead - group_final.Survived.apply(lambda x: 1 if x == 0 else 0)
unknown_factor =(group_final[param[1]] - n_mem_survived_adj - n_mem_dead_adj)/group_final[param[1]]
confidence = 1 - unknown_factor
key = 'Confidence_member_survived'+'_'+param[0]
ratio =(1/group_final[param[1]])*(n_mem_survived_adj - n_mem_dead_adj)
group_final[key] = confidence * ratio
plt.barh(group_final.corr().Survived[-4:].index, group_final.corr().Survived[-4:])
plt.xlabel('Correlation with Survived');
features['Cf_mem_survived'] = group_final['Confidence_member_survived_Group_code']<feature_engineering> | model = ensemble.RandomForestClassifier(**{'criterion': 'entropy', 'max_depth': 5, 'n_estimators': 50, 'random_state': 1})
model.fit(data1[data1_X], data1[Target].values.reshape(-1,))
predictions = model.predict(data_val[data1_X])
output = pd.DataFrame({'PassengerId': data_val.PassengerId, 'Survived': predictions})
output.to_csv('./my_submission_RandomForestClassifier_tunned_F4.csv', index=False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
10,612,785 | features['Parch'] = full['Parch']
features['SibSp'] = full['SibSp']
features['Group_size'] = group['Group_count']
features.head()<categorify> | train_df=pd.read_csv("/kaggle/input/titanic/train.csv")
test_df=pd.read_csv("/kaggle/input/titanic/test.csv")
gender_submission_df=pd.read_csv("/kaggle/input/titanic/gender_submission.csv")
test_PassengerId=test_df["PassengerId"] | Titanic - Machine Learning from Disaster |
10,612,785 | scalar = StandardScaler()
features_z_transformed = features.copy()
continuous = ['Fare']
features_z_transformed[continuous] = scalar.fit_transform(features_z_transformed[continuous])
features_z_transformed.Sex = features_z_transformed.Sex.apply(lambda x: 1 if x == 'male' else 0)
features_final = pd.get_dummies(features_z_transformed)
encoded = list(features_final.columns)
print("{} total features after one-hot encoding.".format(len(encoded)))
features_final_train = features_final[:891]
features_final_test = features_final[891:]<split> | train_df[["Pclass","Survived"]].groupby(["Pclass"],as_index=False ).mean().sort_values(by="Survived",ascending=False ) | Titanic - Machine Learning from Disaster |
10,612,785 | X_train, X_test, y_train, y_test = train_test_split(features_final_train,
train.Survived,
test_size = 0.2,
random_state = 0 )<choose_model_class> | train_df[["Sex","Survived"]].groupby(["Sex"],as_index=False ).mean().sort_values(by="Survived",ascending=False ) | Titanic - Machine Learning from Disaster |
10,612,785 | clf_A = GradientBoostingClassifier(random_state = 0)
clf_B = LogisticRegression(random_state= 0)
clf_C = RandomForestClassifier(random_state= 0)
samples_100 = len(y_train)
samples_10 = int(len(y_train)/2)
samples_1 = int(len(y_train)/10)
results = {}
for clf in [clf_A, clf_B, clf_C]:
clf_name = clf.__class__.__name__
results[clf_name] = {}
for i, samples in enumerate([samples_1, samples_10, samples_100]):
results[clf_name][i] = \
train_predict(clf, samples, X_train, y_train, X_test, y_test )<init_hyperparams> | train_df[["SibSp","Survived"]].groupby(["SibSp"],as_index=False ).mean().sort_values(by="Survived",ascending=False ) | Titanic - Machine Learning from Disaster |
10,612,785 | warnings.filterwarnings('ignore')
clf = RandomForestClassifier(random_state = 0, oob_score = True)
parameters = {'criterion' :['gini'],
'n_estimators' : [350],
'max_depth':[5],
'min_samples_leaf': [4],
'max_leaf_nodes': [10],
'min_impurity_decrease': [0],
'max_features' : [1]
}
scorer = make_scorer(accuracy_score)
grid_obj = GridSearchCV(clf, parameters, scoring = scorer, cv = 10)
grid_fit = grid_obj.fit(X_train,y_train)
best_clf = grid_fit.best_estimator_
predictions =(clf.fit(X_train, y_train)).predict(X_test)
best_predictions = best_clf.predict(X_test)
print("Unoptimized model
------")
print("Accuracy score on testing data: {:.4f}".format(accuracy_score(y_test, predictions)))
print("Oob score on testing data: {:.4f}".format(clf.oob_score_))
print("
Optimized Model
------")
print("Final accuracy score on the testing data: {:.4f}".format(accuracy_score(y_test, best_predictions)))
print("Final oob score on the testing data: {:.4f}".format(best_clf.oob_score_))
print("
Best Parameters
------")
best_clf<save_to_csv> | train_df[["Parch","Survived"]].groupby(["Parch"],as_index=False ).mean().sort_values(by="Survived",ascending=False ) | Titanic - Machine Learning from Disaster |
10,612,785 | final_predict = best_clf.predict(features_final_test)
prediction = pd.DataFrame(full[891:].PassengerId)
prediction['Survived'] = final_predict.astype('int')
prediction.to_csv('./sample_submission.csv',index = False )<set_options> | train_df.columns[train_df.isnull().any() ] | Titanic - Machine Learning from Disaster |
10,612,785 | %matplotlib inline
warnings.filterwarnings('ignore')
<load_from_csv> | test_df.columns[test_df.isnull().any() ] | Titanic - Machine Learning from Disaster |
10,612,785 | train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv")
train.describe(include="all" )<count_missing_values> | train_df_len=len(train_df)
train_df=pd.concat([train_df,test_df],axis=0 ).reset_index(drop=True ) | Titanic - Machine Learning from Disaster |
10,612,785 | print('The null of train set')
print(pd.isnull(train ).sum())
print('*'*50)
for col in train.columns:
msg = 'column: {:>10}\t Percent of NaN value: {:.2f}%'.format(col, 100 *(train[col].isnull().sum() / train[col].shape[0]))
print(msg )<count_missing_values> | train_df.columns[train_df.isnull().any() ] | Titanic - Machine Learning from Disaster |
10,612,785 | print('The null of test set')
print(pd.isnull(test ).sum())
print('*'*50)
for col in test.columns:
msg = 'column: {:>10}\t Percent of NaN value: {:.2f}%'.format(col, 100 *(train[col].isnull().sum() / train[col].shape[0]))
print(msg )<drop_column> | train_df.isnull().sum() | Titanic - Machine Learning from Disaster |
10,612,785 | train = train.drop(['Cabin'], axis = 1)
test = test.drop(['Cabin'], axis = 1 )<drop_column> | train_df[train_df["Embarked"].isnull() ] | Titanic - Machine Learning from Disaster |
10,612,785 | train = train.drop(['Ticket'], axis = 1)
test = test.drop(['Ticket'], axis = 1 )<data_type_conversions> | train_df["Embarked"]=train_df["Embarked"].fillna("C" ) | Titanic - Machine Learning from Disaster |
10,612,785 | train = train.fillna({"Embarked": "S"} )<feature_engineering> | train_df[train_df["Embarked"].isnull() ] | Titanic - Machine Learning from Disaster |
10,612,785 | combine = [train, test]
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False)
pd.crosstab(train['Title'], train['Sex'] )<feature_engineering> | train_df[train_df["Fare"].isnull() ] | Titanic - Machine Learning from Disaster |
10,612,785 | for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Capt', 'Col',
'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
train[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()<categorify> | train_df["Embarked"]=train_df["Embarked"].fillna("C" ) | Titanic - Machine Learning from Disaster |
10,612,785 | title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Royal": 5, "Rare": 6}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train.head()<categorify> | train_df.Fare[1043]=train_df[train_df["Pclass"]==3].Fare.mean() | Titanic - Machine Learning from Disaster |
10,612,785 | mr_age = train[train["Title"] == 1]["AgeGroup"].mode()
miss_age = train[train["Title"] == 2]["AgeGroup"].mode()
mrs_age = train[train["Title"] == 3]["AgeGroup"].mode()
master_age = train[train["Title"] == 4]["AgeGroup"].mode()
royal_age = train[train["Title"] == 5]["AgeGroup"].mode()
rare_age = train[train["Title"] == 6]["AgeGroup"].mode()
age_title_mapping = {1: "Young Adult", 2: "Student", 3: "Adult", 4: "Baby", 5: "Adult", 6: "Adult"}
for x in range(len(train["AgeGroup"])) :
if train["AgeGroup"][x] == "Unknown":
train["AgeGroup"][x] = age_title_mapping[train["Title"][x]]
for x in range(len(test["AgeGroup"])) :
if test["AgeGroup"][x] == "Unknown":
test["AgeGroup"][x] = age_title_mapping[test["Title"][x]]<categorify> | train_df[train_df["Age"].isnull() ] | Titanic - Machine Learning from Disaster |
10,612,785 | age_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7}
train['AgeGroup'] = train['AgeGroup'].map(age_mapping)
test['AgeGroup'] = test['AgeGroup'].map(age_mapping)
train.head()
train = train.drop(['Age'], axis = 1)
test = test.drop(['Age'], axis = 1 )<drop_column> | train_df.Sex.value_counts() | Titanic - Machine Learning from Disaster |
10,612,785 | train = train.drop(['Name'], axis = 1)
test = test.drop(['Name'], axis = 1 )<categorify> | index_nan_age=list(train_df["Age"][train_df["Age"].isnull() ].index ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.