kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
11,066,913
LR_START = 0.0005 LR_MAX = 0.001 LR_MIN = 0.00015 LR_RAMPUP_EPOCHS = 2 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY = 0.83 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr lr_schedule = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose = True) plt_lr(EPOCHS )<train_model>
data['Title'] = data['Title'].replace(['Mr', 'Miss', 'Mrs', 'Master'], [0, 1, 2, 3] )
Titanic - Machine Learning from Disaster
11,066,913
model.fit( get_training_dataset(do_aug=DO_AUG), steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks=[es_acc, epoch_cb, lr_schedule], verbose=1 )<compute_test_metric>
data['Sex'] = data['Sex'].replace(['male', 'female'], [0, 1] )
Titanic - Machine Learning from Disaster
11,066,913
h = model.history plt_acc(h) plt_loss(h )<feature_engineering>
data['Embarked'] = data['Embarked'].fillna(data['Embarked'].mode() [0]) data['Embarked'] = data['Embarked'].replace(['S', 'C', 'Q'], [0, 1, 2] )
Titanic - Machine Learning from Disaster
11,066,913
POST_TRAINING_TIME_START = datetime.now()<predict_on_test>
data['Cabin'] = data['Cabin'].map(lambda x: x[0] )
Titanic - Machine Learning from Disaster
11,066,913
def test(tta=None): test_ds = get_test_dataset(ordered=True, tta=tta) print(f'Computing predictions for TTA {tta}...') test_images_ds = test_ds.map(lambda iw, filename: [iw]) model_pred = model.predict(test_images_ds) return model_pred<categorify>
data['Cabin'].value_counts()
Titanic - Machine Learning from Disaster
11,066,913
model_pred = test(tta=None) model_pred_tta_0 = test(tta=0) model_pred_tta_1 = test(tta=1) model_pred_tta_2 = test(tta=2 )<prepare_output>
def unknown_cabin(cabin): if cabin != 'U': return 1 else: return 0 data['Cabin'] = data['Cabin'].apply(lambda x:unknown_cabin(x))
Titanic - Machine Learning from Disaster
11,066,913
pred_plain = np.argmax(model_pred, axis=-1) test_ds = get_test_dataset(ordered=True) test_ids_ds = test_ds.map(lambda iw, filename: filename ).unbatch() test_ids = next(iter(test_ids_ds.batch(pred_plain.shape[0])) ).numpy().astype('U') df_submission = pd.DataFrame({'filename': test_ids, 'category': pred_plain}) df_submission = df_submission.drop_duplicates() df_submission['category'] = df_submission['category'].apply(lambda c: str(c ).zfill(2)) df_submission<save_to_csv>
data['FamilySize'] = data['SibSp'] + data['Parch'] + 1 data['IsAlone'] = 1 data['IsAlone'].loc[data['FamilySize'] > 1] = 0
Titanic - Machine Learning from Disaster
11,066,913
df_submission.to_csv('submission.csv', index=False) !head submission.csv<feature_engineering>
data = data.drop(['Name', 'Parch', 'SibSp', 'Ticket', 'Last_Name', 'PassengerId'], axis = 1 )
Titanic - Machine Learning from Disaster
11,066,913
pred_tta_0 = np.mean(np.array([model_pred, model_pred_tta_0]), axis=0) pred_tta_0 = np.argmax(pred_tta_0, axis=-1) test_ds = get_test_dataset(ordered=True) test_ids_ds = test_ds.map(lambda iw, filename: filename ).unbatch() test_ids = next(iter(test_ids_ds.batch(pred_tta_0.shape[0])) ).numpy().astype('U') df_submission_tta_0 = pd.DataFrame({'filename': test_ids, 'category': pred_tta_0}) df_submission_tta_0 = df_submission_tta_0.drop_duplicates() df_submission_tta_0['category'] = df_submission_tta_0['category'].apply(lambda c: str(c ).zfill(2)) df_submission_tta_0<save_to_csv>
train = data[:ntrain] test = data[ntrain:]
Titanic - Machine Learning from Disaster
11,066,913
df_submission_tta_0.to_csv('submission_tta_0.csv', index=False) !head submission_tta_0.csv<feature_engineering>
X_test = test X_train = train scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test )
Titanic - Machine Learning from Disaster
11,066,913
pred_tta_all = np.mean(np.array([model_pred, model_pred_tta_0, model_pred_tta_1, model_pred_tta_2]), axis=0) pred_tta_all = np.argmax(pred_tta_all, axis=-1) test_ds = get_test_dataset(ordered=True) test_ids_ds = test_ds.map(lambda iw, filename: filename ).unbatch() test_ids = next(iter(test_ids_ds.batch(pred_tta_all.shape[0])) ).numpy().astype('U') df_submission_tta_all = pd.DataFrame({'filename': test_ids, 'category': pred_tta_all}) df_submission_tta_all = df_submission.drop_duplicates() df_submission_tta_all['category'] = df_submission_tta_all['category'].apply(lambda c: str(c ).zfill(2)) df_submission_tta_all<save_to_csv>
ran = RandomForestClassifier(random_state=1) knn = KNeighborsClassifier() log = LogisticRegression() xgb = XGBClassifier() gbc = GradientBoostingClassifier() svc = SVC(probability=True) ext = ExtraTreesClassifier() ada = AdaBoostClassifier() gnb = GaussianNB() gpc = GaussianProcessClassifier() bag = BaggingClassifier() models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag] scores = [] for mod in models: mod.fit(X_train, y_train) acc = cross_val_score(mod, X_train, y_train, scoring = "accuracy", cv = 10) scores.append(acc.mean() )
Titanic - Machine Learning from Disaster
11,066,913
df_submission_tta_all.to_csv('submission_tta_all.csv', index=False) !head submission_tta_all.csv<load_pretrained>
results = pd.DataFrame({ 'Model': ['Random Forest', 'K Nearest Neighbour', 'Logistic Regression', 'XGBoost', 'Gradient Boosting', 'SVC', 'Extra Trees', 'AdaBoost', 'Gaussian Naive Bayes', 'Gaussian Process', 'Bagging Classifier'], 'Score': scores}) result_df = results.sort_values(by='Score', ascending=False ).reset_index(drop=True) result_df.head(11 )
Titanic - Machine Learning from Disaster
11,066,913
model.save('model.h5') model.save_weights('model_weights.h5' )<train_model>
fi = {'Features':train.columns.tolist() , 'Importance':xgb.feature_importances_} importance = pd.DataFrame(fi, index=None ).sort_values('Importance', ascending=False )
Titanic - Machine Learning from Disaster
11,066,913
print(f'Post training time : {(datetime.now() - POST_TRAINING_TIME_START ).total_seconds() } seconds' )<set_options>
fi = {'Features':train.columns.tolist() , 'Importance':np.transpose(log.coef_[0])} importance = pd.DataFrame(fi, index=None ).sort_values('Importance', ascending=False )
Titanic - Machine Learning from Disaster
11,066,913
%matplotlib inline warnings.filterwarnings('ignore') warnings.filterwarnings('ignore', category=DeprecationWarning) pd.options.display.max_columns = 100 sns.set(rc={'figure.figsize':(12,9)}) <import_modules>
gbc_imp = pd.DataFrame({'Feature':train.columns, 'gbc importance':gbc.feature_importances_}) xgb_imp = pd.DataFrame({'Feature':train.columns, 'xgb importance':xgb.feature_importances_}) ran_imp = pd.DataFrame({'Feature':train.columns, 'ran importance':ran.feature_importances_}) ext_imp = pd.DataFrame({'Feature':train.columns, 'ext importance':ext.feature_importances_}) ada_imp = pd.DataFrame({'Feature':train.columns, 'ada importance':ada.feature_importances_}) importances = gbc_imp.merge(xgb_imp, on='Feature' ).merge(ran_imp, on='Feature' ).merge(ext_imp, on='Feature' ).merge(ada_imp, on='Feature') importances['Average'] = importances.mean(axis=1) importances = importances.sort_values(by='Average', ascending=False ).reset_index(drop=True) importances
Titanic - Machine Learning from Disaster
11,066,913
from xgboost import XGBClassifier import xgboost as xgb<import_modules>
fi = {'Features':importances['Feature'], 'Importance':importances['Average']} importance = pd.DataFrame(fi, index=None ).sort_values('Importance', ascending=False )
Titanic - Machine Learning from Disaster
11,066,913
from sklearn.preprocessing import StandardScaler<load_from_csv>
train = train.drop(['Embarked','IsAlone'], axis=1) test = test.drop(['Embarked', 'IsAlone'], axis=1) X_train = train X_test = test X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test )
Titanic - Machine Learning from Disaster
11,066,913
data = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv') <string_transform>
ran = RandomForestClassifier(random_state=1) knn = KNeighborsClassifier() log = LogisticRegression() xgb = XGBClassifier(random_state=1) gbc = GradientBoostingClassifier(random_state=1) svc = SVC(probability=True) ext = ExtraTreesClassifier(random_state=1) ada = AdaBoostClassifier(random_state=1) gnb = GaussianNB() gpc = GaussianProcessClassifier() bag = BaggingClassifier(random_state=1) models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag] scores_v2 = [] for mod in models: mod.fit(X_train, y_train) acc = cross_val_score(mod, X_train, y_train, scoring = "accuracy", cv = 10) scores_v2.append(acc.mean() )
Titanic - Machine Learning from Disaster
11,066,913
df=data datetime=df.Dates.str.split(pat=" ",expand=True) datetime.columns=['Date','Time'] df_test=test datetime_test=df_test.Dates.str.split(pat=" ",expand=True) datetime_test.columns=['Date','Time']<feature_engineering>
results = pd.DataFrame({ 'Model': ['Random Forest', 'K Nearest Neighbour', 'Logistic Regression', 'XGBoost', 'Gradient Boosting', 'SVC', 'Extra Trees', 'AdaBoost', 'Gaussian Naive Bayes', 'Gaussian Process', 'Bagging Classifier'], 'Original Score': scores, 'Score with feature selection': scores_v2}) result_df = results.sort_values(by='Score with feature selection', ascending=False ).reset_index(drop=True) result_df.head(11 )
Titanic - Machine Learning from Disaster
11,066,913
Date=datetime.Date.str.split(pat="-",expand=True) Date.columns=['Year','Month','Day'] Time=datetime.Time.str.split(pat=":",expand=True) Time.columns=['Hour','Minute','Second'] Date_test=datetime_test.Date.str.split(pat="-",expand=True) Date_test.columns=['Year','Month','Day'] Time_test=datetime_test.Time.str.split(pat=":",expand=True) Time_test.columns=['Hour','Minute','Second']<concatenate>
n_estimators = [10, 25, 50, 75, 100] max_depth = [3, None] max_features = [1, 3, 5, 7] min_samples_split = [2, 4, 6, 8, 10] min_samples_leaf = [2, 4, 6, 8, 10] hyperparams = {'n_estimators': n_estimators, 'max_depth': max_depth, 'max_features': max_features, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf} gd=GridSearchCV(estimator = RandomForestClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
11,066,913
df=pd.concat([df,Date,Time],axis=1) df_test=pd.concat([df_test,Date_test,Time_test],axis=1) <drop_column>
n_neighbors = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20] algorithm = ['auto'] weights = ['uniform', 'distance'] leaf_size = [1, 2, 3, 4, 5, 10, 15, 20, 25, 30] hyperparams = {'algorithm': algorithm, 'weights': weights, 'leaf_size': leaf_size, 'n_neighbors': n_neighbors} gd=GridSearchCV(estimator = KNeighborsClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
11,066,913
df=df.drop(labels=['Dates'],axis=1) df_test=df_test.drop(labels=['Dates'],axis=1) <categorify>
penalty = ['l1', 'l2'] C = np.logspace(0, 4, 10) hyperparams = {'penalty': penalty, 'C': C} gd=GridSearchCV(estimator = LogisticRegression() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
11,066,913
le = preprocessing.LabelEncoder()<prepare_x_and_y>
learning_rate = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2] n_estimators = [10, 25, 50, 75, 100, 250, 500, 750, 1000] hyperparams = {'learning_rate': learning_rate, 'n_estimators': n_estimators} gd=GridSearchCV(estimator = XGBClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
11,066,913
le_res=le.fit_transform(df['Category']) y=pd.DataFrame(le_res) y.columns=['Category'] <feature_engineering>
max_depth = [3, 4, 5, 6, 7, 8, 9, 10] min_child_weight = [1, 2, 3, 4, 5, 6] hyperparams = {'max_depth': max_depth, 'min_child_weight': min_child_weight} gd=GridSearchCV(estimator = XGBClassifier(learning_rate=0.0001, n_estimators=10), param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
11,066,913
df["rot60_X"]=(0.5)* df["Y"] +(1.732/2)* df["X"] df["rot60_Y"]=0.5 * df["Y"] -(1.732/2)* df["X"] df_test["rot60_X"]=(0.5)* df_test["Y"] +(1.732/2)* df_test["X"] df_test["rot60_Y"]=0.5 * df_test["Y"] -(1.732/2)* df_test["X"] df["rot45_X"]=0.707 * df["Y"] + 0.707 * df["X"] df["rot45_Y"]=0.707 * df["Y"] - 0.707 * df["X"] df_test["rot45_X"]=0.707 * df_test["Y"] + 0.707 * df_test["X"] df_test["rot45_Y"]=0.707 * df_test["Y"] - 0.707 * df_test["X"] df["rot30_X"]=(1.732/2)* df["Y"] + 0.5 * df["X"] df["rot30_Y"]=(1.732/2)* df["Y"] - 0.5 * df["X"] df_test["rot30_X"]=(1.732/2)* df_test["Y"] + 0.5 * df_test["X"] df_test["rot30_Y"]=(1.732/2)* df_test["Y"] - 0.5 * df_test["X"] <feature_engineering>
gamma = [i*0.1 for i in range(0,5)] hyperparams = {'gamma': gamma} gd=GridSearchCV(estimator = XGBClassifier(learning_rate=0.0001, n_estimators=10, max_depth=3, min_child_weight=1), param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
11,066,913
df["radial60"]=np.sqrt(np.power(df['rot60_X'],2)+ np.power(df['rot60_Y'],2)) df_test["radial60"]=np.sqrt(np.power(df_test['rot60_X'],2)+ np.power(df_test['rot60_Y'],2))<drop_column>
subsample = [0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1] colsample_bytree = [0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1] hyperparams = {'subsample': subsample, 'colsample_bytree': colsample_bytree} gd=GridSearchCV(estimator = XGBClassifier(learning_rate=0.0001, n_estimators=10, max_depth=3, min_child_weight=1, gamma=0), param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
11,066,913
df=df.drop(labels='rot60_X',axis=1) df_test=df_test.drop(labels='rot60_X',axis=1 )<drop_column>
reg_alpha = [1e-5, 1e-2, 0.1, 1, 100] hyperparams = {'reg_alpha': reg_alpha} gd=GridSearchCV(estimator = XGBClassifier(learning_rate=0.0001, n_estimators=10, max_depth=3, min_child_weight=1, gamma=0, subsample=0.6, colsample_bytree=0.9), param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
11,066,913
df=df.drop(labels='rot60_Y',axis=1) df_test=df_test.drop(labels='rot60_Y',axis=1 )<drop_column>
learning_rate = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2] n_estimators = [100, 250, 500, 750, 1000, 1250, 1500] hyperparams = {'learning_rate': learning_rate, 'n_estimators': n_estimators} gd=GridSearchCV(estimator = GradientBoostingClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
11,066,913
df=df.drop(labels='Second',axis=1) df_test=df_test.drop(labels='Second',axis=1 )<feature_engineering>
Cs = [0.001, 0.01, 0.1, 1, 5, 10, 15, 20, 50, 100] gammas = [0.001, 0.01, 0.1, 1] hyperparams = {'C': Cs, 'gamma' : gammas} gd=GridSearchCV(estimator = SVC(probability=True), param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
11,066,913
df['Minute']=df['Minute'].apply(lambda x:int(x)) df['Minute']=df['Minute'].apply(lambda x : 'low' if x <31 else 'high') df_test['Minute']=df_test['Minute'].apply(lambda x:int(x)) df_test['Minute']=df_test['Minute'].apply(lambda x : 'low' if x <31 else 'high') <feature_engineering>
n_estimators = [10, 25, 50, 75, 100] max_depth = [3, None] max_features = [1, 3, 5, 7] min_samples_split = [2, 4, 6, 8, 10] min_samples_leaf = [2, 4, 6, 8, 10] hyperparams = {'n_estimators': n_estimators, 'max_depth': max_depth, 'max_features': max_features, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf} gd=GridSearchCV(estimator = ExtraTreesClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
11,066,913
df['DayOfWeek']= df['DayOfWeek'].apply(lambda x : 'WeekHigh' if x in('Wednesday','Friday')else('WeekMed' if x in('Tuesday','Thursday','Saturday')else 'WeekLow')) df_test['DayOfWeek']= df_test['DayOfWeek'].apply(lambda x : 'WeekHigh' if x in('Wednesday','Friday')else('WeekMed' if x in('Tuesday','Thursday','Saturday')else 'WeekLow')) <feature_engineering>
n_estimators = [10, 25, 50, 75, 100, 125, 150, 200] learning_rate = [0.001, 0.01, 0.1, 0.5, 1, 1.5, 2] hyperparams = {'n_estimators': n_estimators, 'learning_rate': learning_rate} gd=GridSearchCV(estimator = AdaBoostClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
11,066,913
df['Intersection']=df['Address'].apply(lambda x : 1 if '/' in x else 0) df['Block']=df['Address'].apply(lambda x : 1 if 'Block' in x else 0) df_test['Intersection']=df_test['Address'].apply(lambda x : 1 if '/' in x else 0) df_test['Block']=df_test['Address'].apply(lambda x : 1 if 'Block' in x else 0 )<feature_engineering>
n_restarts_optimizer = [0, 1, 2, 3] max_iter_predict = [1, 2, 5, 10, 20, 35, 50, 100] warm_start = [True, False] hyperparams = {'n_restarts_optimizer': n_restarts_optimizer, 'max_iter_predict': max_iter_predict, 'warm_start': warm_start} gd=GridSearchCV(estimator = GaussianProcessClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
11,066,913
address=pd.DataFrame(df['Address'],columns=['Address']) address=address.Address.str.split(pat=" /",expand=True) address.columns=['Address','Intr2'] address=address.Address.str.split(pat=" /",expand=True) address.columns=['Address'] string=address.iloc[:,0] string=string.str.strip() address_fram=string.to_frame() temp=address_fram['Address'].astype(str ).str[-2:] address=temp.to_frame() address['Address']=address['Address'].apply(lambda x :(x if x in("ST","AV","LN","DR","BL","HY","CT","RD","PL","PZ","TR","AL","CR","WK","EX","RW")else(( "I-80" if x in("80")else("HWY" if x in("WY")else("WAY" if x in("AY")else("TER" if x in("ER")else("ALMS" if x in("MS")else("MAR" if x in("AR")else("PARK" if x in("RK")else("STWY" if x in("WY")else("VIA" if x in("NO")else("BLOCK"))))))))))))) df=df.drop(labels=['Address'],axis=1) df=pd.concat([address,df],axis=1) <feature_engineering>
n_estimators = [10, 15, 20, 25, 50, 75, 100, 150] max_samples = [1, 2, 3, 5, 7, 10, 15, 20, 25, 30, 50] max_features = [1, 3, 5, 7] hyperparams = {'n_estimators': n_estimators, 'max_samples': max_samples, 'max_features': max_features} gd=GridSearchCV(estimator = BaggingClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
11,066,913
address_test=pd.DataFrame(df_test['Address'],columns=['Address']) address_test=address_test.Address.str.split(pat=" /",expand=True) address_test.columns=['Address','Intr2'] address_test=address_test.Address.str.split(pat=" /",expand=True) address_test.columns=['Address'] string_test=address_test.iloc[:,0] string_test=string_test.str.strip() address_fram_test=string_test.to_frame() temp_test=address_fram_test['Address'].astype(str ).str[-2:] address_test=temp_test.to_frame() address_test['Address']=address_test['Address'].apply(lambda x :(x if x in("ST","AV","LN","DR","BL","HY","CT","RD","PL","PZ","TR","AL","CR","WK","EX","RW")else(( "I-80" if x in("80")else("HWY" if x in("WY")else("WAY" if x in("AY")else("TER" if x in("ER")else("ALMS" if x in("MS")else("MAR" if x in("AR")else("PARK" if x in("RK")else("STWY" if x in("WY")else("VIA" if x in("NO")else("BLOCK"))))))))))))) df_test=df_test.drop(labels=['Address'],axis=1) df_test=pd.concat([address_test,df_test],axis=1 )<drop_column>
ran = RandomForestClassifier(n_estimators=50, max_depth=3, max_features=7, min_samples_leaf=8, min_samples_split=6, random_state=1) knn = KNeighborsClassifier(algorithm='auto', leaf_size=3, n_neighbors=10, weights='uniform') log = LogisticRegression(C=21.544346900318832, penalty='l2') xgb = XGBClassifier(learning_rate=0.0001, n_estimators=10, random_state=1) gbc = GradientBoostingClassifier(learning_rate=0.0005, n_estimators=1250, random_state=1) svc = SVC(C=50, gamma=0.01, probability=True) ext = ExtraTreesClassifier(max_depth=3, max_features=7, min_samples_leaf=8, min_samples_split=4, n_estimators=25, random_state=1) ada = AdaBoostClassifier(learning_rate=0.5, n_estimators=25, random_state=1) gpc = GaussianProcessClassifier(max_iter_predict=1) bag = BaggingClassifier(max_features=7, max_samples=50, n_estimators=20,random_state=1) models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag] scores_v3 = [] for mod in models: mod.fit(X_train, y_train) acc = cross_val_score(mod, X_train, y_train, scoring = "accuracy", cv = 10) scores_v3.append(acc.mean() )
Titanic - Machine Learning from Disaster
11,066,913
Id=df['Id'] df=df.drop(['Descript','Resolution','Id'],axis=1) Id_test=df_test['Id'] df_test=df_test.drop(['Descript','Resolution','Id'],axis=1 )<categorify>
results = pd.DataFrame({ 'Model': ['Random Forest', 'K Nearest Neighbour', 'Logistic Regression', 'XGBoost', 'Gradient Boosting', 'SVC', 'Extra Trees', 'AdaBoost', 'Gaussian Naive Bayes', 'Gaussian Process', 'Bagging Classifier'], 'Original Score': scores, 'Score with feature selection': scores_v2, 'Score with tuned parameters': scores_v3}) result_df = results.sort_values(by='Score with tuned parameters', ascending=False ).reset_index(drop=True) result_df.head(11 )
Titanic - Machine Learning from Disaster
11,066,913
le = preprocessing.LabelEncoder()<categorify>
grid_hard = VotingClassifier(estimators = [('Random Forest', ran), ('Logistic Regression', log), ('XGBoost', xgb), ('Gradient Boosting', gbc), ('Extra Trees', ext), ('AdaBoost', ada), ('Gaussian Process', gpc), ('SVC', svc), ('K Nearest Neighbour', knn), ('Bagging Classifier', bag)], voting = 'hard') grid_hard_cv = model_selection.cross_validate(grid_hard, X_train, y_train, cv = 10, return_train_score=True) grid_hard.fit(X_train, y_train) print("Hard voting on train set score mean: {:.2f}".format(grid_hard_cv['train_score'].mean() *100)) print("Hard voting on test set score mean: {:.2f}".format(grid_hard_cv['test_score'].mean() *100))
Titanic - Machine Learning from Disaster
11,066,913
le_res=le.fit_transform(df['DayOfWeek']) Day=pd.DataFrame(le_res) Day.columns=['DayOfWeek'] df=df.drop(labels=['DayOfWeek'],axis=1) df=pd.concat([Day,df],axis=1) le_res_test=le.fit_transform(df_test['DayOfWeek']) Day_test=pd.DataFrame(le_res_test) Day_test.columns=['DayOfWeek'] df_test=df_test.drop(labels=['DayOfWeek'],axis=1) df_test=pd.concat([Day_test,df_test],axis=1 )<categorify>
grid_soft = VotingClassifier(estimators = [('Random Forest', ran), ('Logistic Regression', log), ('XGBoost', xgb), ('Gradient Boosting', gbc), ('Extra Trees', ext), ('AdaBoost', ada), ('Gaussian Process', gpc), ('SVC', svc), ('K Nearest Neighbour', knn), ('Bagging Classifier', bag)], voting = 'soft') grid_soft_cv = model_selection.cross_validate(grid_soft, X_train, y_train, cv = 10, return_train_score=True) grid_soft.fit(X_train, y_train) print("Soft voting on train set score mean: {:.2f}".format(grid_soft_cv['train_score'].mean() *100)) print("Soft voting on test set score mean: {:.2f}".format(grid_soft_cv['test_score'].mean() *100))
Titanic - Machine Learning from Disaster
11,066,913
le_res=le.fit_transform(df['PdDistrict']) District=pd.DataFrame(le_res) District.columns=['District'] df=df.drop(labels=['PdDistrict'],axis=1) df=pd.concat([District,df],axis=1) le_res_test=le.fit_transform(df_test['PdDistrict']) District_test=pd.DataFrame(le_res_test) District_test.columns=['District'] df_test=df_test.drop(labels=['PdDistrict'],axis=1) df_test=pd.concat([District_test,df_test],axis=1) <categorify>
predictions = grid_hard.predict(X_test) submission = pd.concat([pd.DataFrame(passId), pd.DataFrame(predictions)], axis = 'columns') submission.columns = ["PassengerId", "Survived"] submission.to_csv('titanic_submission.csv', header = True, index = False )
Titanic - Machine Learning from Disaster
4,337,324
le_res=le.fit_transform(df['Year']) Year=pd.DataFrame(le_res) Year.columns=['Year'] df=df.drop(labels=['Year'],axis=1) df=pd.concat([Year,df],axis=1) le_res_test=le.fit_transform(df_test['Year']) Year_test=pd.DataFrame(le_res_test) Year_test.columns=['Year'] df_test=df_test.drop(labels=['Year'],axis=1) df_test=pd.concat([Year_test,df_test],axis=1) <categorify>
df = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Titanic - Machine Learning from Disaster
4,337,324
le_res=le.fit_transform(df['Month']) Month=pd.DataFrame(le_res) Month.columns=['Month'] df=df.drop(labels=['Month'],axis=1) df=pd.concat([Month,df],axis=1) le_res_test=le.fit_transform(df_test['Month']) Month_test=pd.DataFrame(le_res_test) Month_test.columns=['Month'] df_test=df_test.drop(labels=['Month'],axis=1) df_test=pd.concat([Month_test,df_test],axis=1) <categorify>
style.use('ggplot')
Titanic - Machine Learning from Disaster
4,337,324
le_res=le.fit_transform(df['Day']) Day=pd.DataFrame(le_res) Day.columns=['Day'] df=df.drop(labels=['Day'],axis=1) df=pd.concat([Day,df],axis=1) le_res_test=le.fit_transform(df_test['Day']) Day_test=pd.DataFrame(le_res_test) Day_test.columns=['Day'] df_test=df_test.drop(labels=['Day'],axis=1) df_test=pd.concat([Day_test,df_test],axis=1) <categorify>
df.drop(['Name'], axis=1, inplace = True) df.drop(['PassengerId'], axis=1, inplace= True) test.drop(['Name'], axis=1, inplace = True)
Titanic - Machine Learning from Disaster
4,337,324
le_res=le.fit_transform(df['Hour']) Hour=pd.DataFrame(le_res) Hour.columns=['Hour'] df=df.drop(labels=['Hour'],axis=1) df=pd.concat([Hour,df],axis=1) le_res_test=le.fit_transform(df_test['Hour']) Hour_test=pd.DataFrame(le_res_test) Hour_test.columns=['Hour'] df_test=df_test.drop(labels=['Hour'],axis=1) df_test=pd.concat([Hour_test,df_test],axis=1) <categorify>
df.fillna(0, inplace = True) test.fillna(0, inplace = True)
Titanic - Machine Learning from Disaster
4,337,324
le_res=le.fit_transform(df['Minute']) Minute=pd.DataFrame(le_res) Minute.columns=['Minute'] df=df.drop(labels=['Minute'],axis=1) df=pd.concat([Minute,df],axis=1) le_res_test=le.fit_transform(df_test['Minute']) Minute_test=pd.DataFrame(le_res_test) Minute_test.columns=['Minute'] df_test=df_test.drop(labels=['Minute'],axis=1) df_test=pd.concat([Minute_test,df_test],axis=1) <feature_engineering>
bins = [0,1, 5, 10, 25, 50, 100] labels = [1,2,3,4,5,6]
Titanic - Machine Learning from Disaster
4,337,324
df["raw_radial"]=np.sqrt(np.power(df['X'],2)+ np.power(df['Y'],2)) df_test["raw_radial"]=np.sqrt(np.power(df_test['X'],2)+ np.power(df_test['Y'],2))<drop_column>
df['Age'] = pd.cut(df['Age'], bins = bins, labels = labels) test['Age'] = pd.cut(test['Age'], bins = bins, labels = labels)
Titanic - Machine Learning from Disaster
4,337,324
le_res=le.fit_transform(df['Category']) cat=pd.DataFrame(le_res) cat.columns=['Category'] df=df.drop(labels=['Category'],axis=1) df=pd.concat([cat,df],axis=1) df.columns<normalization>
df = pd.get_dummies(df, columns = ['Sex'],drop_first = True) test = pd.get_dummies(test, columns = ['Sex'],drop_first = True )
Titanic - Machine Learning from Disaster
4,337,324
xy_scaler = StandardScaler() xy_scaler.fit(df.loc[:,['X','Y']]) xy_scaled = xy_scaler.transform(df.loc[:,['X','Y']]) kmeans = KMeans(n_clusters=26, init='k-means++') kmeans.fit(xy_scaled); xy_scaler_test = StandardScaler() xy_scaler_test.fit(df_test.loc[:,['X','Y']]) xy_scaled_test = xy_scaler_test.transform(df_test.loc[:,['X','Y']]) kmeans = KMeans(n_clusters=26, init='k-means++') kmeans.fit(xy_scaled_test);<predict_on_test>
bins = [0,10,20,30,50, 100, 200 , 250, 300, 350, 400, 450, 500, 550] labels = [1,2,3,4,5,6,7,8,9,10,11,12,13] df['Fare'] = pd.cut(df['Fare'], bins = bins, labels = labels) test['Fare'] = pd.cut(test['Fare'], bins = bins, labels = labels) df['Fare'] = df['Fare'].astype('int32') test['Fare'] = test['Fare'].astype('int32' )
Titanic - Machine Learning from Disaster
4,337,324
geoData = df.loc[:,['X','Y']] df['closest_centers_f'] = kmeans.predict(geoData) id_label=kmeans.labels_ df.loc[:,'label'] = pd.Series(kmeans.labels_) geoData_test = df_test.loc[:,['X','Y']] df_test['closest_centers_f'] = kmeans.predict(geoData_test) id_label_test=kmeans.labels_ df_test.loc[:, 'label'] = pd.Series(kmeans.labels_ )<create_dataframe>
fare_scale = preprocessing.MinMaxScaler() df_fares = df['Fare'].values scaled_fares = df_fares.reshape(-1,1) scaled_fares = fare_scale.fit_transform(scaled_fares) scaled_fares = scaled_fares.flatten() df['Fare'] = pd.Series(scaled_fares) test_fares = test['Fare'].values scaled_fares = test_fares.reshape(-1,1) scaled_fares = fare_scale.transform(scaled_fares) scaled_fares = scaled_fares.flatten() test['Fare'] = pd.Series(scaled_fares )
Titanic - Machine Learning from Disaster
4,337,324
le_res=le.fit_transform(df['Address']) Address=pd.DataFrame(le_res) Address.columns=['Address'] df=df.drop(labels=['Address'],axis=1) df=pd.concat([Address,df],axis=1) le_res=le.fit_transform(df_test['Address']) Address_test=pd.DataFrame(le_res) Address_test.columns=['Address'] df_test=df_test.drop(labels=['Address'],axis=1) df_test=pd.concat([Address_test,df_test],axis=1 )<drop_column>
label_enc = preprocessing.LabelEncoder() df['Cabin'] = df['Cabin'].astype('str') test['Cabin'] = test['Cabin'].astype('str') enc_list = [] for i in df['Cabin'].values: enc_list.append(i) for i in test['Cabin'].values: enc_list.append(i) label_enc.fit(enc_list) df['Cabin'] = label_enc.transform(df['Cabin']) test['Cabin'] = label_enc.transform(test['Cabin'] )
Titanic - Machine Learning from Disaster
4,337,324
df=df[['Address', 'Minute', 'Hour', 'Day', 'Month', 'Year', 'District', 'DayOfWeek', 'X', 'Y', 'rot45_X', 'rot45_Y', 'rot30_X', 'rot30_Y', 'radial60', 'Intersection', 'Block', 'raw_radial', 'closest_centers_f', 'label']] df_test=df_test[['Address', 'Minute', 'Hour', 'Day', 'Month', 'Year', 'District', 'DayOfWeek', 'X', 'Y', 'rot45_X', 'rot45_Y', 'rot30_X', 'rot30_Y', 'radial60', 'Intersection', 'Block', 'raw_radial', 'closest_centers_f', 'label']]<split>
label_enc = preprocessing.LabelEncoder() df['Embarked'] = df['Embarked'].astype('str') df['Embarked'] = label_enc.fit_transform(df['Embarked']) test['Embarked'] = test['Embarked'].astype('str') test['Embarked'] = label_enc.transform(test['Embarked'] )
Titanic - Machine Learning from Disaster
4,337,324
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.20,shuffle=False )<train_model>
label_enc = preprocessing.LabelEncoder() df['Ticket'] = df['Ticket'].astype('str') test['Ticket'] = test['Ticket'].astype('str') enc_list = [] for i in df['Ticket'].values: enc_list.append(i) for i in test['Ticket'].values: enc_list.append(i) label_enc.fit(enc_list) df['Ticket'] = label_enc.transform(df['Ticket']) test['Ticket'] = label_enc.transform(test['Ticket'] )
Titanic - Machine Learning from Disaster
4,337,324
HYPER_PARAMS = { 'learning_rate': 0.02, 'n_estimators':800, 'max_depth': 6, 'subsample': 0.8, 'colsample_bytree': 0.8, 'max_delta_step': 1, 'objective': 'multi:softmax', 'nthread': 4, 'seed': 1747 } model = xgb.XGBClassifier(**HYPER_PARAMS) model.fit(X,y) <predict_on_test>
df = df.astype('float') df.fillna(0, inplace = True) test = test.astype('float') test.fillna(0, inplace = True )
Titanic - Machine Learning from Disaster
4,337,324
y_pred=model.predict_proba(df_test )<categorify>
X = np.array(df.drop(['Survived'], axis = 1)) X = preprocessing.scale(X) Y = np.array(df['Survived'] )
Titanic - Machine Learning from Disaster
4,337,324
temp = data['Category'] le.fit_transform(temp) le.classes_<create_dataframe>
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,stratify = Y, test_size = 0.1, random_state = 31 )
Titanic - Machine Learning from Disaster
4,337,324
y_pred= pd.DataFrame(y_pred, index=Id_test,columns = le.classes_ )<save_to_csv>
model = svm.SVC(kernel = 'poly',degree=3, random_state = 31, gamma = "auto", C = 1) model.fit(X_train, Y_train) predictions = model.predict(X_train) accuracy = accuracy_score(predictions,Y_train) print("Training accuracy = %0.2f" %(accuracy * 100))
Titanic - Machine Learning from Disaster
4,337,324
y_pred.to_csv("submit.csv", float_format = '%.5F' )<choose_model_class>
predictions = model.predict(X_test) accuracy = accuracy_score(predictions,Y_test) print("Testing accuracy = %0.2f" %(accuracy * 100))
Titanic - Machine Learning from Disaster
4,337,324
<train_model>
clf = DecisionTreeClassifier(max_depth=4, random_state = 31) clf.fit(X_train, Y_train) predictions = model.predict(X_train) accuracy_clf = accuracy_score(predictions, Y_train) print("Training accuracy = %0.2f" %(accuracy_clf * 100))
Titanic - Machine Learning from Disaster
4,337,324
<set_options>
predictions = clf.predict(X_test) accuracy_clf = accuracy_score(predictions, Y_test) print("Testing accuracy = %0.2f" %(accuracy_clf * 100))
Titanic - Machine Learning from Disaster
4,337,324
tf.enable_eager_execution() tf.executing_eagerly()<count_values>
Pid = test['PassengerId'] X_final_test = test.drop(['PassengerId'], axis = 1) X_final_test = preprocessing.scale(X_final_test)
Titanic - Machine Learning from Disaster
4,337,324
for label in data.columns[1:]: print("Distribution of", label) print(data[label].value_counts() )<count_values>
Y_pred = clf.predict(X_final_test )
Titanic - Machine Learning from Disaster
4,337,324
LABELS = data.columns[1:] def build_label(row): return ",".join([LABELS[idx] for idx, val in enumerate(row[1:])if val == 1]) data.apply(lambda x: build_label(x), axis=1 ).value_counts()<split>
test['Survived'] = Y_pred
Titanic - Machine Learning from Disaster
4,337,324
train_data, val_data = train_test_split(data, test_size=0.2, random_state=2019 )<init_hyperparams>
test = test.astype('int32' )
Titanic - Machine Learning from Disaster
4,337,324
IMAGE_SIZE = 224 IMAGENET_MEAN = [0.485, 0.456, 0.406] IMAGENET_STD = [0.229, 0.224, 0.225] BATCH_SIZE = 64 LEARNING_RATE = 0.001 LEARNING_RATE_SCHEDULE_FACTOR = 0.1 LEARNING_RATE_SCHEDULE_PATIENCE = 5 MAX_EPOCHS = 100<categorify>
submission = pd.DataFrame({ "PassengerId": test["PassengerId"], "Survived": test["Survived"] } )
Titanic - Machine Learning from Disaster
4,337,324
def preprocessing_image(image): return image<normalization>
submission.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
4,337,324
<categorify><EOS>
submission.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
2,773,958
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<create_dataframe>
%matplotlib inline sns.set(style="whitegrid") warnings.filterwarnings('ignore') pd.set_option('max_colwidth',80 )
Titanic - Machine Learning from Disaster
2,773,958
train_gen = train_datagen.flow_from_dataframe(dataframe=train_data, directory=".. /input/train/train", x_col="filename", y_col="label", class_mode="categorical", target_size=(IMAGE_SIZE,IMAGE_SIZE), batch_size=BATCH_SIZE )<create_dataframe>
df_train = pd.read_csv('.. /input/train.csv') df_test = pd.read_csv('.. /input/test.csv') df_combine = df_train.append(df_test, sort=False )
Titanic - Machine Learning from Disaster
2,773,958
val_gen = train_datagen.flow_from_dataframe(dataframe=val_data, directory=".. /input/train/train", x_col="filename", y_col="label", class_mode="categorical", shuffle=False, target_size=(IMAGE_SIZE,IMAGE_SIZE), batch_size=BATCH_SIZE )<choose_model_class>
feat_desc = pd.DataFrame({'Description': ['Passenger ID', 'Whether the passenger was survived or not', 'The ticket class that the passenger bought', 'The passenger name', 'The gender of the passenger', 'The age of the passenger', 'The number of siblings/spouses that the passenger has aboard the Titanic', 'The number of parents/children that the passenger has aboard the Titanic', 'The ticket number of the passenger', 'The ticket fare that the passenger paid', 'The cabin number that the passenger boarded', 'The passenger port of embarkation'], 'Values': [df_train[i].unique() for i in df_train.columns], 'Number of unique values': [len(df_train[i].unique())for i in df_train.columns]}, index = df_train.columns) feat_desc
Titanic - Machine Learning from Disaster
2,773,958
base_model = keras.applications.ResNet50(input_shape=(IMAGE_SIZE,IMAGE_SIZE,3), include_top=False, weights='imagenet') base_model.trainable = True model = keras.Sequential([ base_model, keras.layers.GlobalAveragePooling2D() , keras.layers.Dense(len(LABELS), activation='sigmoid') ]) model.summary()<compute_test_metric>
df_train.set_index('PassengerId', inplace=True) df_test.set_index('PassengerId', inplace=True) df_combine = df_train.append(df_test, sort=False )
Titanic - Machine Learning from Disaster
2,773,958
def f1(y_true, y_pred): def recall(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives /(possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives /(predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*(( precision*recall)/(precision+recall+K.epsilon()))<define_search_model>
pd.DataFrame({'Number of Missing Values(Training)': df_train.isna().sum() , '% of Missing Values(Training)':(df_train.isna().sum() /df_train.shape[0] * 100 ).round(2), 'Number of Missing Values(Test)': df_test.isna().sum().round(0), '% of Missing Values(Test)':(df_test.isna().sum() /df_test.shape[0] * 100 ).round(2)} )
Titanic - Machine Learning from Disaster
2,773,958
mcp = keras.callbacks.ModelCheckpoint("resnet50.h5", monitor="val_f1", save_best_only=True, save_weights_only=True, verbose=1,mode='max') rlr = keras.callbacks.ReduceLROnPlateau(monitor='val_f1', factor=LEARNING_RATE_SCHEDULE_FACTOR, mode='max', patience=LEARNING_RATE_SCHEDULE_PATIENCE, min_lr=1e-8, verbose=1) callbacks = [mcp, rlr]<train_model>
df_train['Cabin'].fillna('Z', inplace=True) df_test['Cabin'].fillna('Z', inplace=True )
Titanic - Machine Learning from Disaster
2,773,958
device = '/cpu:0' if tfe.num_gpus() == 0 else '/gpu:0' with tf.device(device): steps_per_epoch = train_gen.n // BATCH_SIZE validation_steps = val_gen.n // BATCH_SIZE model.compile(optimizer=keras.optimizers.Adam(lr=LEARNING_RATE), loss='binary_crossentropy', metrics=[f1]) history = model.fit_generator(train_gen, steps_per_epoch=steps_per_epoch, epochs= MAX_EPOCHS, verbose=1, validation_data=val_gen, validation_steps=validation_steps, callbacks=callbacks )<load_from_csv>
df_combine['Cabin'] = df_train['Cabin'].str.get(0) df_combine.groupby('Cabin')['Pclass'].value_counts().to_frame('Count' )
Titanic - Machine Learning from Disaster
2,773,958
test_df = pd.read_csv(".. /input/sample_submission.csv") test_df.head()<create_dataframe>
df_train.loc[df_train['Embarked'].isna() ]
Titanic - Machine Learning from Disaster
2,773,958
test_gen = train_datagen.flow_from_dataframe(dataframe=test_df, directory=".. /input/test/test", x_col="filename", class_mode=None, shuffle=False, target_size=(IMAGE_SIZE,IMAGE_SIZE), batch_size=BATCH_SIZE )<load_pretrained>
df_train.loc[df_train['Embarked'].isna() , 'Embarked'] = 'S'
Titanic - Machine Learning from Disaster
2,773,958
model.load_weights("resnet50.h5" )<predict_on_test>
df_test.loc[df_test['Fare'].isna() ]
Titanic - Machine Learning from Disaster
2,773,958
pred = model.predict_generator(test_gen )<feature_engineering>
df_test.loc[df_test['Fare'].isna() ] = df_train['Fare'].mean()
Titanic - Machine Learning from Disaster
2,773,958
for idx, row in test_df.iterrows() : test_df.loc[idx]['predicted'] = probs2label(pred[idx] )<save_to_csv>
df_age = df_train.loc[~df_train['Age'].isna() ] reg = LinearRegression() reg.fit(df_age[['SibSp', 'Pclass']], df_age['Age']) pred_age_train = pd.Series(reg.predict(df_train[['SibSp', 'Pclass']]), index=df_train.index) pred_age_test = pd.Series(reg.predict(df_test[['SibSp', 'Pclass']]), index=df_test.index) df_train['Age'].fillna(pred_age_train, inplace=True) df_test['Age'].fillna(pred_age_test, inplace=True )
Titanic - Machine Learning from Disaster
2,773,958
test_df.to_csv("submission.csv", index=False )<load_from_csv>
df_train.loc[df_train['Age'] < 0]
Titanic - Machine Learning from Disaster
2,773,958
def create_data(x, len_seq): X = [] y = [] for i in range(len(x)- len_seq): X.append(x[i:i+len_seq]) y.append(x[i+len_seq]) return np.array(X), np.array(y) df = pd.read_csv('.. /input/train.csv', index_col=0) x = df['Min temp.'].values len_seq = 121 X, y = create_data(x, len_seq) X = np.reshape(X,(X.shape[0], X.shape[1], 1)) print(X.shape) print(y.shape )<choose_model_class>
df_test.loc[df_test['Age'] < 0]
Titanic - Machine Learning from Disaster
2,773,958
model = Sequential() model.add(LSTM(units=64, return_sequences=False, input_shape=(len_seq, 1))) model.add(Dropout(0.2)) model.add(Dense(units=1, activation='linear')) model.compile(loss='mean_squared_error', optimizer='adam') model.summary()<train_model>
df_train.loc[df_train['Age'] < 0, 'Age'] = df_train['Age'].mean() df_test.loc[df_test['Age'] < 0, 'Age'] = df_train['Age'].mean()
Titanic - Machine Learning from Disaster
2,773,958
model.fit(X, y, batch_size=365, epochs=100, validation_split=0.1 )<predict_on_test>
df_train['Age'] = df_train['Age'].round().astype('int') df_test['Age'] = df_test['Age'].round().astype('int' )
Titanic - Machine Learning from Disaster
2,773,958
x = df['Min temp.'].values[len(x)-len_seq:] predict = np.array([]) for i in range(365): X = np.reshape(x,(1, len_seq, 1)) p = model.predict(X)[0] predict = np.append(predict, p) x = np.append(np.delete(x, 0), p) %matplotlib inline plt.plot(predict) plt.show()<save_to_csv>
df_train['Title'] = df_train['Name'].str.split(',', expand=True)[1].str.split('.' ).str.get(0) df_test['Title'] = df_test['Name'].str.split(',', expand=True)[1].str.split('.' ).str.get(0 )
Titanic - Machine Learning from Disaster
2,773,958
submit = pd.read_csv('.. /input/sampleSubmission.csv') submit['Min temp.'] = predict submit.to_csv('submission.csv', index=False )<set_options>
df_train['Title'].value_counts().to_frame('Number of Passengers' ).T
Titanic - Machine Learning from Disaster
2,773,958
%matplotlib inline warnings.simplefilter(action="ignore", category=FutureWarning )<load_pretrained>
df_train['SibSp+Parch'] = df_train['SibSp'] + df_train['Parch'] df_test['SibSp+Parch'] = df_test['SibSp'] + df_test['Parch']
Titanic - Machine Learning from Disaster
2,773,958
shutil.copyfile(src=".. /input/redcarpet.py", dst=".. /working/redcarpet.py") <load_pretrained>
df_train['IsAlone'] = df_train['SibSp+Parch'].map(lambda x: 1 if x == 0 else 0) df_test['IsAlone'] = df_test['SibSp+Parch'].map(lambda x: 1 if x == 0 else 0 )
Titanic - Machine Learning from Disaster
2,773,958
item_file = ".. /input/talent.pkl" item_records, COLUMN_LABELS, READABLE_LABELS, ATTRIBUTES = pickle.load(open(item_file, "rb")) item_df = pd.DataFrame(item_records)[ATTRIBUTES + COLUMN_LABELS].fillna(value=0) ITEM_NAMES = item_df["name"].values ITEM_IDS = item_df["id"].values item_df.head()<load_pretrained>
train_size = df_train.shape[0] test_size = df_test.shape[0] df_combine = df_train.append(df_test, sort=False) df_combine['Last_Name'] = df_combine['Name'].str.split(',', expand=True)[0]
Titanic - Machine Learning from Disaster
2,773,958
s_items = mat_to_sets(item_df[COLUMN_LABELS].values) print("Items", len(s_items)) csr_train, csr_test, csr_input, csr_hidden = pickle.load(open(".. /input/train_test_mat.pkl", "rb")) m_split = [np.array(csr.todense())for csr in [csr_train, csr_test, csr_input, csr_hidden]] m_train, m_test, m_input, m_hidden = m_split print("Matrices", len(m_train), len(m_test), len(m_input), len(m_hidden)) s_train, s_test, s_input, s_hidden = pickle.load(open(".. /input/train_test_set.pkl", "rb")) print("Sets", len(s_train), len(s_test), len(s_input), len(s_hidden))<prepare_output>
fare_df = df_combine.loc[df_combine['SibSp+Parch'] > 0, ['Last_Name', 'Fare', 'SibSp+Parch']] fare_diff =(fare_df.groupby(['Last_Name', 'SibSp+Parch'])['Fare'].aggregate('max')- fare_df.groupby(['Last_Name', 'SibSp+Parch'])['Fare'].aggregate('min')).value_counts() print('Percentage of families with the same fare: {:.2f}%'.format(fare_diff[0]/fare_diff.sum() *100))
Titanic - Machine Learning from Disaster
2,773,958
like_df = pd.DataFrame(m_train, columns=ITEM_NAMES) like_df.head()<import_modules>
train_temp_df = df_combine.iloc[:train_size] family_group_df = train_temp_df.loc[train_temp_df['SibSp+Parch']>0, ['Last_Name', 'Fare', 'SibSp+Parch', 'Survived']].groupby(['Last_Name', 'Fare']) family_df = pd.DataFrame(data=family_group_df.size() , columns=['Size_in_training_dataset']) family_df['Survived_Total'] = family_group_df['Survived'].sum().astype('int') family_df['SibSp+Parch'] = family_group_df['SibSp+Parch'].mean().astype('int') all_survived =(family_df['Size_in_training_dataset'] == family_df['Survived_Total'] ).sum() /len(family_df)*100 print('Families with the whole members survived: {:.1f}%'.format(all_survived)) all_not_survived =(family_df['Survived_Total']==0 ).sum() /len(family_df)*100 print('Families with the whole members not survived: {:.1f}%'.format(all_not_survived))
Titanic - Machine Learning from Disaster
2,773,958
from redcarpet import mapk_score, uhr_score from redcarpet import jaccard_sim, cosine_sim from redcarpet import collaborative_filter, content_filter, weighted_hybrid from redcarpet import get_recs<import_modules>
df_combine['FamilySurvival'] = 0.5 for _, grp_df in df_combine[['Survived', 'Last_Name', 'Fare']].groupby(['Last_Name', 'Fare']): if len(grp_df)> 1: for ind, row in grp_df.iterrows() : smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() if smax == 1: df_combine.loc[ind, 'FamilySurvival'] = 1 elif smin == 0: df_combine.loc[ind, 'FamilySurvival'] = 0
Titanic - Machine Learning from Disaster
2,773,958
from mlxtend.frequent_patterns import apriori<create_dataframe>
train_temp_df = df_combine.iloc[:train_size] ticket_group_df = train_temp_df.groupby('Ticket') ticket_df = pd.DataFrame(data=ticket_group_df.size() , columns=['Size_in_training_dataset']) ticket_df['Survived_Total'] = ticket_group_df['Survived'].sum().astype('int') ticket_df['Not_Family'] = ticket_group_df['Last_Name'].unique().apply(len) ticket_df = ticket_df.loc[(ticket_df['Size_in_training_dataset'] > 1)&(ticket_df['Not_Family'] > 1)] print('Number of groups in training set that is not family: {}'.format(len(ticket_df))) all_survived =(ticket_df['Size_in_training_dataset'] == ticket_df['Survived_Total'] ).sum() /len(ticket_df)*100 print('Families with the whole members survived: {:.1f}%'.format(all_survived)) all_not_survived =(ticket_df['Survived_Total'] == 0 ).sum() /len(ticket_df)*100 print('Families with the whole members not survived: {:.1f}%'.format(all_not_survived))
Titanic - Machine Learning from Disaster
2,773,958
def mine_association_rules(m_train, min_support=0.5): freq_is = apriori(pd.DataFrame(m_train), max_len=2, min_support=min_support) freq_is["len"] = freq_is["itemsets"].apply(lambda s: len(s)) freq_is = freq_is.query("len == 2") if len(freq_is)== 0: return pd.DataFrame([], columns=["a", "b", "ct", "support"]) item_counts = m_train.sum(axis=0) rules = [] for record in freq_is.to_dict(orient="records"): fset = record["itemsets"] a = min(fset) b = max(fset) n = len(m_train) supp = record["support"] all_a = item_counts[a] all_b = item_counts[b] both = supp * n f11 = int(both) f10 = int(all_a - both) f01 = int(all_b - both) f00 = int(n -(f11 + f10 + f01)) rules.append({"a": a, "b": b, "ct":(f11, f10, f01, f00), "support": supp}) rules.append({"a": b, "b": a, "ct":(f11, f01, f10, f00), "support": supp}) rule_df = pd.DataFrame(rules) return rule_df<filter>
for grp, grp_df in df_combine.groupby('Ticket'): if len(grp_df)> 1: for ind, row in grp_df.iterrows() : if(row['FamilySurvival'])== 0 or(row['FamilySurvival'] == 0.5): smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() if smax == 1: df_combine.loc[ind, 'FamilySurvival'] = 1 elif smin == 0: df_combine.loc[ind, 'FamilySurvival'] = 0 df_train['FamilySurvival'] = df_combine.iloc[:train_size]['FamilySurvival'] df_test['FamilySurvival'] = df_combine.iloc[train_size:]['FamilySurvival']
Titanic - Machine Learning from Disaster
2,773,958
used_rules = all_rules.query("support >= 0.03") len(all_rules), len(used_rules )<compute_train_metric>
df_combine['RealFare'] = 0 for _, grp_df in df_combine.groupby(['Ticket']): grp_size = len(grp_df) for ind, row in grp_df.iterrows() : real_fare = row['Fare']/grp_size df_combine.loc[ind, 'RealFare'] = real_fare df_train['Fare'] = df_combine.iloc[:train_size]['RealFare'] df_test['Fare'] = df_combine.iloc[train_size:]['RealFare']
Titanic - Machine Learning from Disaster
2,773,958
def sets_to_contingency(a, b, N): f11 = len(a.intersection(b)) f10 = len(a)- f11 f01 = len(b)- f11 f00 = N -(f11 + f10 + f01) return f11, f10, f01, f00 def rule_support(f11, f10, f01, f00): N = f11 + f10 + f01 + f00 return f11 / N def rule_confidence(f11, f10, f01, f00): return f11 /(f11 + f10) def rule_interest_factor(f11, f10, f01, f00): N = f11 + f10 + f01 + f00 f1p = f11 + f10 fp1 = f11 + f01 return(N * f11)/(f1p * fp1) def rule_phi_correlation(f11, f10, f01, f00): f1p = f11 + f10 f0p = f01 + f00 fp1 = f11 + f01 fp0 = f10 + f00 num =(f11 * f00)-(f01 * f10) denom = np.sqrt(f1p * fp1 * f0p * fp0) if denom == 0: return 0.0 return num / denom def rule_is_score(f11, f10, f01, f00): intfac = rule_interest_factor(f11, f10, f01, f00) supp = support(f11, f10, f01, f00) return np.sqrt(intfac * supp )<sort_values>
df_train.dtypes.to_frame(name='Data type' )
Titanic - Machine Learning from Disaster
2,773,958
def rank_association_rules(mined_rules_df, score_fn, score_name="score"): rule_df = pd.DataFrame(mined_rules_df.copy()) rule_df[score_name] = rule_df["ct"].apply(lambda ct: score_fn(*ct)) return rule_df.sort_values(by=score_name, ascending=False )<statistical_test>
df_train.replace({'male': 1, 'female': 0}, inplace=True) df_test.replace({'male': 1, 'female': 0}, inplace=True )
Titanic - Machine Learning from Disaster
2,773,958
def association_filter(rules_df, m_train, s_input, score_fn=rule_support, min_score=0.01, k=10): score_name = "score" ranked_rules = rank_association_rules(rules_df, score_fn=score_fn, score_name=score_name) top_rules_df = ranked_rules.query("{} >= {}".format(score_name, min_score)) rule_records = top_rules_df.to_dict(orient="records") all_recs = [] for likes in s_input: rec_map = {} for rule in rule_records: if rule["a"] in likes and rule["b"] not in likes: if rule["b"] not in rec_map: rec_map[rule["b"]] = 0 rec_map[rule["b"]] += rule[score_name] ranks = sorted(rec_map.items() , key=lambda p: p[1], reverse=True) all_recs.append(ranks[0:k]) return all_recs, top_rules_df<compute_train_metric>
df_train.replace({'S': 0, 'C': 1, 'Q': 2}, inplace=True) df_test.replace({'S': 0, 'C': 1, 'Q': 2}, inplace=True )
Titanic - Machine Learning from Disaster
2,773,958
k_top = 10 print("Metric: Support") rec_scores, rule_df = association_filter(used_rules, m_train, s_input, score_fn=rule_support) print("MAP = {0:.3f}".format(mapk_score(s_hidden, get_recs(rec_scores), k=k_top))) print("UHR = {0:.3f}".format(uhr_score(s_hidden, get_recs(rec_scores), k=k_top))) print("Used {} association rules.".format(len(rule_df))) rule_df.head()<compute_train_metric>
df_train.drop(columns=['Name', 'Ticket'], inplace=True) df_test.drop(columns=['Name', 'Ticket'], inplace=True )
Titanic - Machine Learning from Disaster
2,773,958
print("Metric: Confidence") rec_scores, rule_df = association_filter(used_rules, m_train, s_input, score_fn=rule_confidence) print("MAP = {0:.3f}".format(mapk_score(s_hidden, get_recs(rec_scores), k=k_top))) print("UHR = {0:.3f}".format(uhr_score(s_hidden, get_recs(rec_scores), k=k_top))) print("Used {} association rules.".format(len(rule_df))) rule_df.head()<compute_train_metric>
df_train['Title'].value_counts().to_frame('Number of Passengers' ).T
Titanic - Machine Learning from Disaster
2,773,958
print("Metric: Phi Correlation") rec_scores, rule_df = association_filter(used_rules, m_train, s_input, score_fn=rule_phi_correlation) print("MAP = {0:.3f}".format(mapk_score(s_hidden, get_recs(rec_scores), k=k_top))) print("UHR = {0:.3f}".format(uhr_score(s_hidden, get_recs(rec_scores), k=k_top))) print("Used {} association rules.".format(len(rule_df))) rule_df.head()<compute_train_metric>
df_test['Title'].value_counts().to_frame('Number of Passengers' ).T
Titanic - Machine Learning from Disaster
2,773,958
print("Metric: IS Score") rec_scores, rule_df = association_filter(used_rules, m_train, s_input, score_fn=rule_is_score) print("MAP = {0:.3f}".format(mapk_score(s_hidden, get_recs(rec_scores), k=k_top))) print("UHR = {0:.3f}".format(uhr_score(s_hidden, get_recs(rec_scores), k=k_top))) print("Used {} association rules.".format(len(rule_df))) rule_df.head()<find_best_params>
df_train['Title'] = df_train['Title'].str.strip().map(lambda x: x if x == 'Mr' or x == 'Miss' or x == 'Mrs' or x == 'Master' else 'Other') df_test['Title'] = df_test['Title'].str.strip().map(lambda x: x if x == 'Mr' or x == 'Miss' or x == 'Mrs' or x == 'Master' else 'Other' )
Titanic - Machine Learning from Disaster
2,773,958
def get_all_scores(rec_scores): all_scores = [] for recs in rec_scores: for(item, score)in recs: all_scores.append(score) return all_scores<feature_engineering>
df_train = df_train.join(pd.get_dummies(df_train['Title'], prefix='Title'), how='outer') df_test = df_test.join(pd.get_dummies(df_test['Title'], prefix='Title'), how='outer' )
Titanic - Machine Learning from Disaster
2,773,958
supp_scores, _ = association_filter(used_rules, m_train, s_input, score_fn=rule_support) conf_scores, _ = association_filter(used_rules, m_train, s_input, score_fn=rule_confidence) phi_scores, _ = association_filter(used_rules, m_train, s_input, score_fn=rule_phi_correlation) is_scores, _ = association_filter(used_rules, m_train, s_input, score_fn=rule_is_score )<compute_test_metric>
df_train.drop(columns=['Title'], inplace=True) df_test.drop(columns=['Title'], inplace=True )
Titanic - Machine Learning from Disaster