kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
1,995,435
df_train = pd.read_csv('.. /input/train.csv') df_train.name = 'Training Set' df_test = pd.read_csv('.. /input/test.csv') df_test.name = 'Test Set' print('Number of Training Examples = {}'.format(df_train.shape[0])) print('Number of Test Examples = {}'.format(df_test.shape[0])) print('Training X Shape = {}'.format(df_train.shape)) print('Training y Shape = {}'.format(df_train['target'].shape[0])) print('Test X Shape = {}'.format(df_test.shape)) print('Test y Shape = {}'.format(df_test.shape[0])) print(df_train.columns) print(df_test.columns )<merge>
def impute_age(cols): Age = cols[0] Pclass = cols[1] if pd.isnull(Age): if Pclass == 1: return 37 elif Pclass == 2: return 29 else: return 24 else: return Age
Titanic - Machine Learning from Disaster
1,995,435
df_train_unique = df_train.agg(['nunique'] ).transpose().sort_values(by='nunique') df_test_unique = df_test.agg(['nunique'] ).transpose().sort_values(by='nunique') df_uniques = df_train_unique.drop('target' ).reset_index().merge(df_test_unique.reset_index() , how='left', right_index=True, left_index=True) df_uniques.drop(columns=['index_y'], inplace=True) df_uniques.columns = ['Feature', 'Training Set Unique Count', 'Test Set Unique Count']<create_dataframe>
def impute_fare(cols): Fare = cols[0] Pclass = cols[1] if pd.isnull(Fare): if Pclass == 1: return 84 elif Pclass == 2: return 20 else: return 13 else: return Fare
Titanic - Machine Learning from Disaster
1,995,435
df_qdist = pd.DataFrame(np.zeros(( 200, 9)) , columns=['Quartile 1 Positives', 'Quartile 2 Positives', 'Quartile 3 Positives', 'Quartile 4 Positives', 'Quartile 1 Positive Percentage', 'Quartile 2 Positive Percentage', 'Quartile 3 Positive Percentage', 'Quartile 4 Positive Percentage', 'Quartile Order']) features = [col for col in df_train.columns.values.tolist() if col.startswith('var')] quartiles = np.arange(0, 1, 0.25) df_qdist.index = features for i, feature in enumerate(features): for j, quartile in enumerate(quartiles): target_counts = df_train[np.logical_and(df_train[feature] >= df_train[feature].quantile(q=quartile), df_train[feature] < df_train[feature].quantile(q=quartile + 0.25)) ].target.value_counts() ones_per = target_counts[1] /(target_counts[0] + target_counts[1])* 100 df_qdist.iloc[i, j] = target_counts[1] df_qdist.iloc[i, j + 4] = ones_per pers = df_qdist.columns.tolist() [4:-1] for i, index in enumerate(df_qdist.index): order = df_qdist[pers].iloc[[i]].sort_values(by=index, ascending=False, axis=1 ).columns order_str = ''.join([col[9] for col in order]) df_qdist.iloc[i, 8] = order_str df_qdist = df_qdist.round(2) df_qdist.head(10 )<filter>
df_train['Age'] = df_train[['Age','Pclass']].apply(impute_age,axis=1 )
Titanic - Machine Learning from Disaster
1,995,435
df_qdist[np.logical_or(df_qdist['Quartile Order'].str.startswith('2'), df_qdist['Quartile Order'].str.startswith('3')) ]<count_unique_values>
sex = pd.get_dummies(df_train['Sex'],drop_first=True) embark = pd.get_dummies(df_train['Embarked'],drop_first=True) df_train = pd.concat([df_train,sex,embark],axis=1 )
Titanic - Machine Learning from Disaster
1,995,435
test = df_test.drop(['ID_code'], axis=1 ).values unique_count = np.zeros_like(test) for feature in range(test.shape[1]): _, index, count = np.unique(test[:, feature], return_counts=True, return_index=True) unique_count[index[count == 1], feature] += 1 real_samples = np.argwhere(np.sum(unique_count, axis=1)> 0)[:, 0] synth_samples = np.argwhere(np.sum(unique_count, axis=1)== 0)[:, 0] print('Number of real samples in test set is {}'.format(len(real_samples))) print('Number of synthetic samples in test set is {}'.format(len(synth_samples)) )<data_type_conversions>
df_train["Family"] = df_train["SibSp"] + df_train["Parch"] + 1 df_train['Single'] = df_train['Family'].map(lambda s: 1 if s == 1 else 0) df_train['SmallF'] = df_train['Family'].map(lambda s: 1 if s == 2 else 0) df_train['MedF'] = df_train['Family'].map(lambda s: 1 if 3 <= s <= 4 else 0) df_train['LargeF'] = df_train['Family'].map(lambda s: 1 if s >= 5 else 0) df_train['Senior'] = df_train['Age'].map(lambda s:1 if s>60 else 0 )
Titanic - Machine Learning from Disaster
1,995,435
features = [col for col in df_train.columns if col.startswith('var')] df_all = pd.concat([df_train, df_test.ix[real_samples]]) for feature in features: temp = df_all[feature].value_counts(dropna=True) df_train[feature + 'vc'] = df_train[feature].map(temp ).map(lambda x: min(10, x)).astype(np.uint8) df_test[feature + 'vc'] = df_test[feature].map(temp ).map(lambda x: min(10, x)).astype(np.uint8) df_train[feature + 'sum'] =(( df_train[feature] - df_all[feature].mean())* df_train[feature + 'vc'].map(lambda x: int(x > 1)) ).astype(np.float32) df_test[feature + 'sum'] =(( df_test[feature] - df_all[feature].mean())* df_test[feature + 'vc'].map(lambda x: int(x > 1)) ).astype(np.float32) df_train[feature + 'sum2'] =(( df_train[feature])* df_train[feature + 'vc'].map(lambda x: int(x > 2)) ).astype(np.float32) df_test[feature + 'sum2'] =(( df_test[feature])* df_test[feature + 'vc'].map(lambda x: int(x > 2)) ).astype(np.float32) df_train[feature + 'sum3'] =(( df_train[feature])* df_train[feature + 'vc'].map(lambda x: int(x > 4)) ).astype(np.float32) df_test[feature + 'sum3'] =(( df_test[feature])* df_test[feature + 'vc'].map(lambda x: int(x > 4)) ).astype(np.float32) print('Training set shape after creating magic features: {}'.format(df_train.shape)) print('Test set shape after creating magic features: {}'.format(df_test.shape))<categorify>
dataset_title = [i.split(",")[1].split(".")[0].strip() for i in df_test["Name"]] df_test["Title"] = pd.Series(dataset_title) df_test["Title"] = df_test["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') df_test["Title"] = df_test["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3}) df_test["Title"] = df_test["Title"].astype(int) df_test.drop(labels = ["Name"], axis = 1, inplace = True )
Titanic - Machine Learning from Disaster
1,995,435
def augment(x, y, t=2): xs, xn = [], [] for i in range(t // 2): mask = y == 0 x1 = x[mask].copy() ids = np.arange(x1.shape[0]) featnum = x1.shape[1] // 200 - 1 for c in range(200): np.random.shuffle(ids) x1[:, [c] + [200 + featnum * c + idc for idc in range(featnum)]] = x1[ids][:, [c] + [200 + featnum * c + idc for idc in range(featnum)]] xn.append(x1) for i in range(t): mask = y > 0 x1 = x[mask].copy() ids = np.arange(x1.shape[0]) featnum = x1.shape[1] // 200 - 1 for c in range(200): np.random.shuffle(ids) x1[:, [c] + [200 + featnum * c + idc for idc in range(1)]] = x1[ids][:, [c] + [200 + featnum * c + idc for idc in range(1)]] xs.append(x1) xs = np.vstack(xs) xn = np.vstack(xn) ys = np.ones(xs.shape[0]) yn = np.zeros(xn.shape[0]) x = np.vstack([x, xs, xn]) y = np.concatenate([y, ys, yn]) return x, y<feature_engineering>
df_test['Age'] = df_test[['Age','Pclass']].apply(impute_age,axis=1) sex = pd.get_dummies(df_test['Sex'],drop_first=True) embark = pd.get_dummies(df_test['Embarked'],drop_first=True) df_test = pd.concat([df_test,sex,embark],axis=1) df_test['Fare'].fillna(value=df_test['Fare'].median() ,inplace=True )
Titanic - Machine Learning from Disaster
1,995,435
<categorify>
df_test['Fare'] = df_test[['Fare','Pclass']].apply(impute_fare,axis=1 )
Titanic - Machine Learning from Disaster
1,995,435
class KMeansFeaturizer: def __init__(self, k, target_scale=5.0, random_state=None): self.k = k self.target_scale = target_scale self.random_state = random_state self.encoder = OneHotEncoder(categories='auto' ).fit(np.array(range(k)).reshape(-1, 1)) def fit(self, X, y=None): if y is None: kmeans = KMeans(n_clusters=self.k, n_init=20, random_state=self.random_state) kmeans.fit(X) self.kmeans = kmeans self.cluster_centers_ = kmeans.cluster_centers_ else: Xy = np.hstack(( X, y[:, np.newaxis] * self.target_scale)) kmeans_pretrain = KMeans(n_clusters=self.k, n_init=20, random_state=self.random_state) kmeans_pretrain.fit(Xy) kmeans = KMeans(n_clusters=self.k, init=kmeans_pretrain.cluster_centers_[:, :2], n_init=1, max_iter=1) kmeans.fit(X) self.kmeans = kmeans self.cluster_centers_ = km_model.cluster_centers_ return self def transform(self, X, y=None): clusters = self.kmeans.predict(X) return self.encoder.transform(clusters.reshape(-1, 1)) def fit_transform(self, X, y=None): self.fit(X, y) return self.transform(X, y) <feature_engineering>
df_test["Fare"] = df_test["Fare"].map(lambda i: np.log(i)if i > 0 else 0 )
Titanic - Machine Learning from Disaster
1,995,435
transform_feature(df=df_train, feature='var_108', transformation=np.round, decimals=2 )<init_hyperparams>
df_test["Family"] = df_test["SibSp"] + df_test["Parch"] + 1
Titanic - Machine Learning from Disaster
1,995,435
gbdt_param = { 'objective': 'binary', 'boosting': 'gbdt', 'learning_rate': 0.01, 'num_leaves': 15, 'tree_learner': 'serial', 'num_threads': 8, 'seed': SEED, 'max_depth': -1, 'min_data_in_leaf': 50, 'min_sum_hessian_in_leaf': 10, 'bagging_fraction': 0.6, 'bagging_freq': 5, 'feature_fraction': 0.05, 'lambda_l1': 1., 'bagging_seed': SEED, 'verbosity ': 1, 'boost_from_average': False, 'metric': 'auc', }<prepare_x_and_y>
df_test['Single'] = df_test['Family'].map(lambda s: 1 if s == 1 else 0) df_test['SmallF'] = df_test['Family'].map(lambda s: 1 if s == 2 else 0) df_test['MedF'] = df_test['Family'].map(lambda s: 1 if 3 <= s <= 4 else 0) df_test['LargeF'] = df_test['Family'].map(lambda s: 1 if s >= 5 else 0) df_test['Senior'] = df_test['Age'].map(lambda s:1 if s>60 else 0 )
Titanic - Machine Learning from Disaster
1,995,435
predictors = df_train.columns.tolist() [2:] X_test = df_test[predictors] n_splits = 5 skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=SEED) oof = df_train[['ID_code', 'target']] oof['predict'] = 0 predictions = df_test[['ID_code']] val_aucs = [] feature_importance_df = pd.DataFrame()<split>
df_train['Person'] = df_train[['Age','Sex']].apply(get_person,axis=1) df_test['Person'] = df_test[['Age','Sex']].apply(get_person,axis=1) person_dummies_train = pd.get_dummies(df_train['Person']) person_dummies_train.columns = ['Child','Female','Male'] person_dummies_train.drop(['Male'], axis=1, inplace=True) person_dummies_test = pd.get_dummies(df_test['Person']) person_dummies_test.columns = ['Child','Female','Male'] person_dummies_test.drop(['Male'], axis=1, inplace=True) df_train = df_train.join(person_dummies_train) df_test = df_test.join(person_dummies_test) df_train.drop(['Person'],axis=1,inplace=True) df_test.drop(['Person'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
1,995,435
for fold,(train_ind, val_ind)in enumerate(skf.split(df_train, df_train.target.values)) : X_train, y_train = df_train.iloc[train_ind][predictors], df_train.iloc[train_ind]['target'] X_valid, y_valid = df_train.iloc[val_ind][predictors], df_train.iloc[val_ind]['target'] N = 1 p_valid, yp = 0, 0 for i in range(N): print(' Fold {} - N {}'.format(fold + 1, i + 1)) X_t, y_t = augment(X_train.values, y_train.values) weights = np.array([0.8] * X_t.shape[0]) weights[:X_train.shape[0]] = 1.0 print('Shape of X_train after augment: {} Shape of y_train after augment: {}'.format(X_t.shape, y_t.shape)) X_t = pd.DataFrame(X_t) X_t = X_t.add_prefix('var_') trn_data = lgb.Dataset(X_t, label=y_t, weight=weights) val_data = lgb.Dataset(X_valid, label=y_valid) evals_result = {} lgb_clf = lgb.train(gbdt_param, trn_data, 100000, valid_sets=[trn_data, val_data], early_stopping_rounds=5000, verbose_eval=1000, evals_result=evals_result) p_valid += lgb_clf.predict(X_valid) yp += lgb_clf.predict(X_test) fold_importance_df = pd.DataFrame() fold_importance_df["feature"] = predictors fold_importance_df["importance"] = lgb_clf.feature_importance() fold_importance_df["fold"] = fold + 1 feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0) oof['predict'][val_ind] = p_valid / N val_score = roc_auc_score(y_valid, p_valid) val_aucs.append(val_score) predictions['fold{}'.format(fold + 1)] = yp / N <compute_test_metric>
df_train.drop('male',axis=1,inplace=True) df_test.drop('male',axis=1,inplace=True )
Titanic - Machine Learning from Disaster
1,995,435
mean_auc = np.mean(val_aucs) std_auc = np.std(val_aucs) all_auc = roc_auc_score(oof['target'], oof['predict']) print('Mean AUC: {}, std: {}. All AUC: {}.'.format(mean_auc, std_auc, all_auc))<save_to_csv>
df_train.drop(['Cabin','Ticket'],axis = 1, inplace= True) df_test.drop(['Ticket','Cabin'],axis = 1, inplace= True )
Titanic - Machine Learning from Disaster
1,995,435
predictions['target'] = np.mean(predictions[[col for col in predictions.columns if col not in ['ID_code', 'target']]].values, axis=1) predictions.to_csv('predictions.csv', index=None) sub_df = pd.DataFrame({"ID_code":df_test["ID_code"].values}) sub_df["target"] = predictions['target'] sub_df.to_csv("lgb_submission.csv", index=False) oof.to_csv('lgb_oof.csv', index=False )<load_from_csv>
df_train.drop(['Sex','Embarked'],axis=1,inplace=True) df_test.drop(['Sex','Embarked'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
1,995,435
warnings.simplefilter(action='ignore', category=FutureWarning) train_df = pd.read_csv('.. /input/santander-customer-transaction-prediction/train.csv') test_df = pd.read_csv('.. /input/santander-customer-transaction-prediction/test.csv') synthetic_samples_indexes = np.load('.. /input/fakedata/synthetic_samples_indexes.npy') private_LB = np.load('.. /input/fakedata/private_LB.npy') public_LB = np.load('.. /input/fakedata/public_LB.npy') full = pd.concat([train_df, pd.concat([test_df.loc[private_LB], test_df.loc[public_LB]], sort = False)], sort = False) for feat in ['var_' + str(x)for x in range(200)]: count_values = full.groupby(feat)[feat].count() train_df['new_' + feat] = count_values.loc[train_df[feat]].values test_df['new_' + feat] = count_values.loc[test_df[feat]].values print('full_df has {} rows and {} columns'.format(full.shape[0], full.shape[1])) <init_hyperparams>
X_train, X_test, y_train, y_test = train_test_split(df_train.drop('Survived',axis=1), df_train['Survived'], test_size=0.15, random_state=101 )
Titanic - Machine Learning from Disaster
1,995,435
seed = 2319 param = { 'num_leaves': 8, 'min_data_in_leaf': 17, 'learning_rate': 0.01, 'min_sum_hessian_in_leaf': 9.67, 'bagging_fraction': 0.8329, 'bagging_freq': 2, 'feature_fraction': 1, 'lambda_l1': 0.6426, 'lambda_l2': 0.3067, 'min_gain_to_split': 0.02832, 'max_depth': -1, 'seed': seed, 'feature_fraction_seed': seed, 'bagging_seed': seed, 'drop_seed': seed, 'data_random_seed': seed, 'objective': 'binary', 'boosting_type': 'gbdt', 'verbosity': -1, 'metric': 'auc', 'is_unbalance': True, 'save_binary': True, 'boost_from_average': 'false', 'num_threads': 8 } features = [c for c in train_df.columns if(c not in ['ID_code', 'target'])] test_size = 0.3 X_train, X_test, y_train, y_test = train_test_split(train_df[features], train_df['target'], test_size = test_size, random_state=42) iterations = 110 y_hat = np.zeros([int(200000*test_size), 200]) i = 0 for feature in ['var_' + str(x)for x in range(200)]: feat_choices = [feature, 'new_' + feature] lgb_train = lgb.Dataset(X_train[feat_choices], y_train) gbm = lgb.train(param, lgb_train, iterations, verbose_eval=-1) y_hat[:, i] = gbm.predict(X_test[feat_choices], num_iteration=gbm.best_iteration) i += 1 sub_preds =(y_hat ).sum(axis=1) score = roc_auc_score(y_test, sub_preds) print('Your CV score is', score )<init_hyperparams>
dt = DecisionTreeClassifier() dt.fit(X_train,y_train); plt.figure(figsize=(18,18)) plot_tree(dt,filled=True);
Titanic - Machine Learning from Disaster
1,995,435
iterations = 126 param = {'bagging_fraction': 0.7693, 'bagging_freq': 2, 'lambda_l1': 0.7199, 'lambda_l2': 1.992, 'learning_rate': 0.009455, 'max_depth': 3, 'min_data_in_leaf': 22, 'min_gain_to_split': 0.06549, 'min_sum_hessian_in_leaf': 18.55, 'num_leaves': 20, 'feature_fraction': 1, 'save_binary': True, 'seed': 2319, 'feature_fraction_seed': 2319, 'bagging_seed': 2319, 'drop_seed': 2319, 'data_random_seed': 2319, 'objective': 'binary', 'boosting_type': 'gbdt', 'verbosity': -1, 'metric': 'auc', 'is_unbalance': True, 'boost_from_average': 'false', 'num_threads': 6}<init_hyperparams>
XGB = XGBClassifier(max_depth=4,learning_rate=0.005,n_estimators=500,n_jobs=-1,min_child_weight=2) XGB.fit(X_train,y_train )
Titanic - Machine Learning from Disaster
1,995,435
iterations = 126 param = {'bagging_fraction': 0.7693, 'bagging_freq': 2, 'lambda_l1': 0.7199, 'lambda_l2': 1.992, 'learning_rate': 0.009455, 'max_depth': 3, 'min_data_in_leaf': 22, 'min_gain_to_split': 0.06549, 'min_sum_hessian_in_leaf': 18.55, 'num_leaves': 20, 'feature_fraction': 1, 'save_binary': True, 'seed': 2319, 'feature_fraction_seed': 2319, 'bagging_seed': 2319, 'drop_seed': 2319, 'data_random_seed': 2319, 'objective': 'binary', 'boosting_type': 'gbdt', 'verbosity': -1, 'metric': 'auc', 'is_unbalance': True, 'boost_from_average': 'false', 'num_threads': 6} features = [c for c in train_df.columns if(c not in ['ID_code', 'target'])] test_size = 0.3 X_train, X_test, y_train, y_test = train_test_split(train_df[features], train_df['target'], test_size = test_size, random_state=42) iterations = 110 y_hat = np.zeros([int(200000*test_size), 200]) i = 0 for feature in ['var_' + str(x)for x in range(200)]: feat_choices = [feature, 'new_' + feature] lgb_train = lgb.Dataset(X_train[feat_choices], y_train) gbm = lgb.train(param, lgb_train, iterations, verbose_eval=-1) y_hat[:, i] = gbm.predict(X_test[feat_choices], num_iteration=gbm.best_iteration) i += 1 <compute_test_metric>
y_pred = pd.DataFrame(XGB.predict(df_test)) y_pred['Survived'] = y_pred[0] y_pred.drop(0,axis=1,inplace=True) y_pred['PassengerId'] = df_test['PassengerId'] y_pred_xgb = y_pred y_pred.to_csv('titanic_pred_xgb.csv',index=False )
Titanic - Machine Learning from Disaster
1,995,435
weights = [] for col in range(200): if roc_auc_score(y_test, y_hat[:,col])>= 0.5: weights.append(roc_auc_score(y_test, y_hat[:,col])) else: weights.append(0) weights = np.array(weights) weights =(weights - weights.mean())/ weights.mean() weights += 1 sub_preds_regular =(y_hat ).sum(axis=1) sub_preds_weighted =(y_hat*weights ).sum(axis=1) score_regular = roc_auc_score(y_test, sub_preds_regular) score_weighted = roc_auc_score(y_test, sub_preds_weighted) print('Your unweigthed score is:', score_regular) print('Your weigthed score is:', score_weighted )<compute_test_metric>
Scaler1 = StandardScaler() Scaler2 = StandardScaler() X_train_scaled = Scaler1.fit_transform(X_train) df_test_scaled = Scaler2.fit_transform(df_test )
Titanic - Machine Learning from Disaster
1,995,435
test_data_length = len(y_test) validation_length = int(test_data_length/4) weights = [] for col in range(200): if roc_auc_score(y_test[:validation_length], y_hat[:validation_length,col])>= 0.5: weights.append(roc_auc_score(y_test[:validation_length], y_hat[:validation_length,col])) else: weights.append(0) weights = np.array(weights) weights =(weights - weights.mean())/ weights.mean() weights += 1 sub_preds_regular =(y_hat[validation_length:] ).sum(axis=1) sub_preds_weighted =(y_hat[validation_length:]*weights ).sum(axis=1) score_regular = roc_auc_score(y_test[validation_length:], sub_preds_regular) score_weighted = roc_auc_score(y_test[validation_length:], sub_preds_weighted) print('Your unweigthed score is:', score_regular) print('Your weigthed score is:', score_weighted) if score_weighted > score_regular: print('Your weights ARE NOT overfitting') else: print('Your weights ARE overfitting' )<concatenate>
logmodel = LogisticRegression(C=10 ).fit(X_train,y_train) y_pred = pd.DataFrame(logmodel.predict(df_test)) y_pred['Survived'] = y_pred[0] y_pred.drop(0,axis=1,inplace=True) y_pred['PassengerId'] = df_test['PassengerId'] y_pred_lr = y_pred y_pred.to_csv('titanic_pred_logistic.csv',index=False )
Titanic - Machine Learning from Disaster
1,995,435
pd.options.mode.chained_assignment = None min_n_unique_full = 2000 min_n_unique_train = int(min_n_unique_full*2/3) min_n_unique_test = int(min_n_unique_full*1/3) full = pd.concat([train_df, pd.concat([test_df.loc[private_LB], test_df.loc[public_LB]], sort = False)], sort = False) true_test_df = pd.concat([test_df.loc[private_LB], test_df.loc[public_LB]], sort = False) count = 1 for feat in ['var_' + str(x)for x in range(200)]: if count%50 == 1: print('Processing reached', feat) n_unique_list_full = list(set(full['new_' + feat])) n_unique_list_full.sort() for i in range(len(n_unique_list_full)) : n_unique_full = n_unique_list_full[i] len_n_unique_full = len(full[feat][full['new_' + feat] == n_unique_full]) len_n_unique_train = len(train_df[feat][train_df['new_' + feat] == n_unique_full]) len_n_unique_test = len(true_test_df[feat][true_test_df['new_' + feat] == n_unique_full]) if len_n_unique_full < min_n_unique_full or len_n_unique_train < min_n_unique_train or len_n_unique_test < min_n_unique_test : try: full['new_' + feat][full['new_' + feat] == n_unique_full] = n_unique_list_full[i+1] train_df['new_' + feat][train_df['new_' + feat] == n_unique_full] = n_unique_list_full[i+1] true_test_df['new_' + feat][true_test_df['new_' + feat] == n_unique_full] = n_unique_list_full[i+1] test_df['new_' + feat][test_df['new_' + feat] == n_unique_full] = n_unique_list_full[i+1] except: continue for i in reversed(range(len(n_unique_list_full))): n_unique_full = n_unique_list_full[i] len_n_unique_full = len(full[feat][full['new_' + feat] == n_unique_full]) len_n_unique_train = len(train_df[feat][train_df['new_' + feat] == n_unique_full]) len_n_unique_test = len(true_test_df[feat][true_test_df['new_' + feat] == n_unique_full]) if len_n_unique_full < min_n_unique_full or len_n_unique_train < min_n_unique_train or len_n_unique_test < min_n_unique_test : try: full['new_' + feat][full['new_' + feat] == n_unique_full] = n_unique_list_full[i-1] train_df['new_' + feat][train_df['new_' + feat] == n_unique_full] = n_unique_list_full[i-1] true_test_df['new_' + feat][true_test_df['new_' + feat] == n_unique_full] = n_unique_list_full[i-1] test_df['new_' + feat][test_df['new_' + feat] == n_unique_full] = n_unique_list_full[i-1] except: continue count+=1 print('Done!' )<define_variables>
RFC = RandomForestClassifier(n_estimators=500,max_depth=9,min_samples_split=3) RFC.fit(X_train,y_train )
Titanic - Machine Learning from Disaster
1,995,435
for n in [2, 53, 81, 111, 121, 126, 130, 146]: print('Variable', 'var_' + str(n)) plt.figure(figsize=(15,8)) count = 1 for n_unique in list(set(train_df['new_var_' + str(n)])) [:6]: var_tar_0 = train_df['var_' + str(n)][(train_df['new_var_' + str(n)] == n_unique)& (train_df['target'] == 0)] var_tar_1 = train_df['var_' + str(n)][(train_df['new_var_' + str(n)] == n_unique)& (train_df['target'] == 1)] var_tar_0 samples_0 = len(var_tar_0) samples_1 = len(var_tar_1) if samples_0 < 20 or samples_1 < 20: continue samples_percentage = np.round(( samples_0 + samples_1)*100/ 200000) plt.subplot(2, 3, count) sns.kdeplot(var_tar_0, shade=False, color="red", label = 'target = 0') sns.kdeplot(var_tar_1, shade=False, color="blue", label = 'target = 1') plt.title('Count = {} represents {}% of the data'.format(n_unique, samples_percentage)) plt.xlabel('Feature Values') plt.ylabel('Probability') count += 1 plt.tight_layout() plt.show() <define_variables>
y_pred = pd.DataFrame(RFC.predict(df_test)) y_pred['Survived'] = y_pred[0] y_pred.drop(0,axis=1,inplace=True) y_pred['PassengerId'] = df_test['PassengerId'] y_pred_rf = y_pred y_pred.to_csv('titanic_pred_rfc.csv',index=False )
Titanic - Machine Learning from Disaster
1,995,435
for n in [117, 120]: print('Variable', 'var_' + str(n)) plt.figure(figsize=(15,8)) count = 1 for n_unique in list(set(train_df['new_var_' + str(n)])) [:6]: var_tar_0 = train_df['var_' + str(n)][(train_df['new_var_' + str(n)] == n_unique)& (train_df['target'] == 0)] var_tar_1 = train_df['var_' + str(n)][(train_df['new_var_' + str(n)] == n_unique)& (train_df['target'] == 1)] var_tar_0 samples_0 = len(var_tar_0) samples_1 = len(var_tar_1) if samples_0 < 20 or samples_1 < 20: continue samples_percentage = np.round(( samples_0 + samples_1)*100/ 200000) plt.subplot(2, 3, count) sns.kdeplot(var_tar_0, shade=False, color="red", label = 'target = 0') sns.kdeplot(var_tar_1, shade=False, color="blue", label = 'target = 1') plt.title('Count = {} represents {}% of the data'.format(n_unique, samples_percentage)) plt.xlabel('Feature Values') plt.ylabel('Probability') count += 1 plt.tight_layout() plt.show()<filter>
lgb = LGBMClassifier(learning_rate=0.01,max_depth=5,n_estimators=500,num_leaves=3 ).fit(X_train,y_train )
Titanic - Machine Learning from Disaster
1,995,435
n_unique = 1 n = 126 var_126_tar_0 = train_df['var_' + str(n)][(train_df['new_var_' + str(n)] == n_unique)& (train_df['target'] == 0)] var_126_tar_1 = train_df['var_' + str(n)][(train_df['new_var_' + str(n)] == n_unique)& (train_df['target'] == 1)] n = 81 var_81_tar_0 = train_df['var_' + str(n)][(train_df['new_var_' + str(n)] == n_unique)& (train_df['target'] == 0)] var_81_tar_1 = train_df['var_' + str(n)][(train_df['new_var_' + str(n)] == n_unique)& (train_df['target'] == 1)] fig = plt.subplots() sns.kdeplot(var_126_tar_0, shade=False, color="r", label = 'var_126_tar_0') sns.kdeplot(var_126_tar_1, shade=False, color="blue", label = 'var_126_tar_1') sns.kdeplot(var_81_tar_0, shade=False, color="orange", label = 'var_81_tar_0') sns.kdeplot(var_81_tar_1, shade=False, color="black", label = 'var_81_tar_1') plt.title('PDFs of VAR 126 and 81 BEFORE transformation') plt.xlabel('Feature Values') plt.ylabel('Probability') plt.show() n_unique = 1 n = 126 var_126_tar_0 = train_df['var_' + str(n)][(train_df['new_var_' + str(n)] == n_unique)& (train_df['target'] == 0)] * 2.486 -18.5 var_126_tar_1 = train_df['var_' + str(n)][(train_df['new_var_' + str(n)] == n_unique)& (train_df['target'] == 1)] * 2.486 - 18.5 n = 81 var_81_tar_0 = train_df['var_' + str(n)][(train_df['new_var_' + str(n)] == n_unique)& (train_df['target'] == 0)] var_81_tar_1 = train_df['var_' + str(n)][(train_df['new_var_' + str(n)] == n_unique)& (train_df['target'] == 1)] fig = plt.subplots() sns.kdeplot(var_126_tar_0, shade=False, color="r", label = 'var_126_tar_0') sns.kdeplot(var_126_tar_1, shade=False, color="blue", label = 'var_126_tar_1') sns.kdeplot(var_81_tar_0, shade=False, color="orange", label = 'var_81_tar_0') sns.kdeplot(var_81_tar_1, shade=False, color="black", label = 'var_81_tar_1') plt.title('PDFs of VAR 126 and 81 AFTER transformation') plt.xlabel('Feature Values') plt.ylabel('Probability') plt.show()<init_hyperparams>
y_pred = pd.DataFrame(lgb.predict(df_test)) y_pred['Survived'] = y_pred[0] y_pred.drop(0,axis=1,inplace=True) y_pred['PassengerId'] = df_test['PassengerId'] y_pred_lgb = y_pred y_pred.to_csv('titanic_pred_lgb.csv',index=False )
Titanic - Machine Learning from Disaster
1,995,435
param = {'bagging_fraction': 0.5166, 'bagging_freq': 3, 'lambda_l1': 3.968, 'lambda_l2': 1.263, 'learning_rate': 0.00141, 'max_depth': 3, 'min_data_in_leaf': 17, 'min_gain_to_split': 0.2525, 'min_sum_hessian_in_leaf': 19.55, 'num_leaves': 20, 'feature_fraction': 1, 'save_binary': True, 'seed': 2319, 'feature_fraction_seed': 2319, 'bagging_seed': 2319, 'drop_seed': 2319, 'data_random_seed': 2319, 'objective': 'binary', 'boosting_type': 'gbdt', 'verbosity': -1, 'metric': 'auc', 'is_unbalance': True, 'boost_from_average': 'false', 'num_threads': 6} folds = StratifiedKFold(n_splits=4, shuffle=False, random_state=2319) target = train_df['target'] y_hat = np.zeros([200000, 200]) test_hat = np.zeros([200000, 200]) i = 0 for feature in ['var_' + str(x)for x in range(200)]: print(feature) feat_choices = [feature, 'new_' + feature] oof = np.zeros(len(train_df)) predictions = np.zeros(len(test_df)) for fold_,(trn_idx, val_idx)in enumerate(folds.split(train_df[feat_choices].values, target.values)) : trn_data = lgb.Dataset(train_df.iloc[trn_idx][feat_choices], label=target.iloc[trn_idx]) val_data = lgb.Dataset(train_df.iloc[val_idx][feat_choices], label=target.iloc[val_idx]) clf = lgb.train(param, trn_data, 126, valid_sets = [trn_data, val_data], verbose_eval=-1) oof[val_idx] = clf.predict(train_df.iloc[val_idx][feat_choices], num_iteration=clf.best_iteration) predictions += clf.predict(test_df[feat_choices], num_iteration=clf.best_iteration)/ folds.n_splits print("CV score: {:<8.5f}".format(roc_auc_score(target, oof))) y_hat[:, i] = oof test_hat[:, i] = predictions i += 1 weights = [] for col in range(200): if roc_auc_score(target, y_hat[:,col])>= 0.5: weights.append(roc_auc_score(target, y_hat[:,col])) else: weights.append(0) weights = np.array(weights) weights =(weights - weights.mean())/ weights.mean() weights += 1 sub_preds =(y_hat*weights ).sum(axis=1)/200 print('Your CV score is', roc_auc_score(target, sub_preds)) sub_preds_test =(test_hat*weights ).sum(axis=1)/200 sub = pd.DataFrame({"ID_code": test_df.ID_code.values}) sub["target"] = sub_preds_test sub.to_csv('submission.csv', index=False )<set_options>
print("XGB train score: ",round(XGB.score(X_train,y_train),2), " XGB test score: ",round(XGB.score(X_test,y_test),2)) print("Log-Reg.train score: ",round(logmodel.score(X_train,y_train),2)," Log-Reg.test score: ",round(logmodel.score(X_test,y_test),2)) print("Random Forest's train score: ",round(RFC.score(X_train,y_train),2), " Random Forest test score: ",round(RFC.score(X_test,y_test),2)) print("LGBM train score: ",round(lgb.score(X_train,y_train),2), " LGB Model test score: ",round(lgb.score(X_test,y_test),2))
Titanic - Machine Learning from Disaster
1,995,435
warnings.filterwarnings('ignore') PATH=".. /input/" N_SPLITS = 10 SEED_SKF = 4221<split>
y_valid_xgb = XGB.predict(X_test) y_valid_log = logmodel.predict(X_test) y_valid_rfc = RFC.predict(X_test) y_valid_lgb = lgb.predict(X_test )
Titanic - Machine Learning from Disaster
1,995,435
def merge_train_test(df_train, df_test): if "target" not in df_test.columns.values: df_test["target"] = -1 res = pd.concat([df_train, df_test]) res.reset_index(inplace=True, drop=True) return res def split_train_test(df): df_train = df[df["target"] >= 0] df_test = df[df["target"] <= -1] df_train.reset_index(inplace=True, drop=True) df_test.reset_index(inplace=True, drop=True) assert list(df_train["ID_code"].values)== [f"train_{i}" for i in range(200000)] assert list(df_test["ID_code"].values)== [f"test_{i}" for i in range(200000)] return df_train, df_test<load_from_csv>
fpr_xgb, tpr_xgb, thresholds_xgb = roc_curve(y_test, y_valid_xgb) roc_auc_xgb = auc(fpr_xgb, tpr_xgb) fpr_log, tpr_log, thresholds_log = roc_curve(y_test, y_valid_log) roc_auc_log = auc(fpr_log, tpr_log) fpr_rfc, tpr_rfc, thresholds_rfc = roc_curve(y_test, y_valid_rfc) roc_auc_rfc = auc(fpr_rfc, tpr_rfc) fpr_lgb, tpr_lgb, thresholds_lgb = roc_curve(y_test, y_valid_lgb) roc_auc_lgb = auc(fpr_lgb, tpr_lgb )
Titanic - Machine Learning from Disaster
1,995,435
%%time train_df = pd.read_csv(PATH+"train.csv") test_df = pd.read_csv(PATH+"test.csv" )<categorify>
y_pred_final = y_pred y_pred_final['Survived'] = round(0.25 * y_pred_lgb['Survived'] + 0.25 * y_pred_rf['Survived'] + 0.25 * y_pred_xgb['Survived'] + 0.25 * y_pred_lr['Survived']) y_pred_final['PassengerId'] = df_test['PassengerId'] y_pred_final['Survived'] = y_pred_final['Survived'].astype(int) y_pred_final.to_csv('titanic_pred_final.csv',index=False )
Titanic - Machine Learning from Disaster
1,995,435
<categorify><EOS>
y_pred_final['Survived'].value_counts()
Titanic - Machine Learning from Disaster
394,817
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<merge>
warnings.filterwarnings('ignore') %matplotlib inline
Titanic - Machine Learning from Disaster
394,817
df_merged = merge_train_test(train_df, test_df) df_merged.tail()<categorify>
training_set = pd.read_csv('.. /input/train.csv') testing_set = pd.read_csv('.. /input/test.csv') pID = testing_set['PassengerId']
Titanic - Machine Learning from Disaster
394,817
%%time count_enc = [None] * 200 df_real = df_merged[df_merged["target"]!=-2] for v in range(200): enc = CountEncoder() enc.fit(df_real[f"var_{v}"]) count_enc[v] = enc.transform(df_merged[f"var_{v}"]) for v in range(200): df_merged[f"cnt_{v}"] = count_enc[v] del df_real<split>
print(training_set.isnull().sum() ," ") print(testing_set.isnull().sum() )
Titanic - Machine Learning from Disaster
394,817
train_df, test_df = split_train_test(df_merged) target = train_df['target'] gc.collect() print(train_df.shape) test_df.head()<init_hyperparams>
for dataset in [training_set,testing_set]: dataset['Age'].fillna(dataset['Age'].median() , inplace = True) dataset['Embarked'].fillna(dataset['Embarked'].mode() [0], inplace = True) dataset['Fare'].fillna(dataset['Fare'].median() , inplace = True) drop_column = ['PassengerId','Cabin', 'Ticket'] training_set.drop(drop_column, axis=1, inplace = True) testing_set.drop(drop_column, axis=1, inplace = True) print(training_set.isnull().sum()) print("-"*10) print(testing_set.isnull().sum() )
Titanic - Machine Learning from Disaster
394,817
param = { "objective": "binary", "boost": "gbdt", "metric": "auc", "boost_from_average": False, "learning_rate": 0.01, "num_leaves": 5, "max_depth": -1, "tree_learner": "serial", "feature_fraction": 1.0, "bagging_freq": 5, "bagging_fraction": 0.4, "min_data_in_leaf": 80, "min_sum_hessian_in_leaf": 10.0, "verbosity": 1, "seed": 44000, }<define_variables>
for dataset in [training_set,testing_set]: dataset['FamilySize'] = dataset ['SibSp'] + dataset['Parch'] + 1 dataset['IsAlone'] = 1 dataset['IsAlone'].loc[dataset['FamilySize'] > 1] = 0 dataset['FareBin'] = pd.qcut(dataset['Fare'], 4) dataset['AgeBin'] = pd.cut(dataset['Age'].astype(int), 5) training_set.info() testing_set.info() training_set.sample(10 )
Titanic - Machine Learning from Disaster
394,817
target = train_df['target'] df_merged_cut = [df_merged[[f"var_{v}", f"cnt_{v}", ]] for v in range(200)] gc.collect()<split>
training_set[training_set["Name"].str.contains("Master")]
Titanic - Machine Learning from Disaster
394,817
%%time skf = StratifiedKFold(n_splits=N_SPLITS, shuffle=True, random_state=SEED_SKF) oof = np.zeros(len(train_df)) predictions = np.zeros(len(test_df)) feature_importance_df = pd.DataFrame() for fold_,(trn_idx, val_idx)in enumerate(skf.split(train_df.values, target.values)) : print("fold n°{}".format(fold_)) df_meta = df_merged[["ID_code", "target"]] trn_X, trn_y = train_df.iloc[trn_idx], target.iloc[trn_idx] val_X, val_y = train_df.iloc[val_idx], target.iloc[val_idx] for v in range(200): print(f"var {v}") features = [f"var_{v}", f"cnt_{v}", ] trn_data = lgb.Dataset(trn_X[features], label=trn_y) val_data = lgb.Dataset(val_X[features], label=val_y) num_round = 1000000 clf = lgb.train(param, trn_data, num_round, valid_sets=[trn_data, val_data], verbose_eval=1000, early_stopping_rounds=100) df_meta[f"{v}_meta"] = clf.predict(df_merged_cut[v], num_iteration=clf.best_iteration ).astype(np.float32) df_meta.to_pickle(f"fold_{fold_}_meta.pickle" )<init_hyperparams>
label = LabelEncoder() for dataset in [training_set,testing_set]: dataset['Sex_Code'] = label.fit_transform(dataset['Sex']) dataset['Embarked_Code'] = label.fit_transform(dataset['Embarked']) dataset['AgeBin_Code'] = label.fit_transform(dataset['AgeBin']) dataset['FareBin_Code'] = label.fit_transform(dataset['FareBin']) Target = ['Survived'] training_set_x = ['Sex','Pclass', 'Embarked','SibSp', 'Parch', 'Age', 'Fare', 'FamilySize', 'IsAlone'] training_set_x_calc = ['Sex_Code','Pclass', 'Embarked_Code','SibSp', 'Parch', 'Age', 'Fare'] training_set_xy = Target + training_set_x print('Original X Y: ', training_set_xy, ' ') training_set_x_bin = ['Sex_Code','Pclass', 'Embarked_Code', 'FamilySize', 'AgeBin_Code', 'FareBin_Code'] training_set_xy_bin = Target + training_set_x_bin print('Bin X Y: ', training_set_xy_bin, ' ') training_set_dummy = pd.get_dummies(training_set[training_set_x],drop_first=True) training_set_x_dummy = training_set_dummy.columns.tolist() training_set_xy_dummy = Target + training_set_x_dummy print('Dummy X Y: ', training_set_xy_dummy, ' ') training_set_dummy.head()
Titanic - Machine Learning from Disaster
394,817
param = { "objective": "binary", "boost": "gbdt", "metric": "auc", "boost_from_average": "false", "learning_rate": 0.01, "num_leaves": 2, "max_depth": -1, "tree_learner": "serial", "feature_fraction": 0.5, "bagging_freq": 5, "bagging_fraction": 0.4, "min_data_in_leaf": 80, "min_sum_hessian_in_leaf": 10.0, "verbosity": 1, "seed": 44000, }<split>
y = training_set['Survived'] X = training_set_dummy
Titanic - Machine Learning from Disaster
394,817
%%time skf = StratifiedKFold(n_splits=N_SPLITS, shuffle=True, random_state=SEED_SKF) oof = np.zeros(len(train_df)) predictions = np.zeros(len(test_df)) feature_importance_df = pd.DataFrame() for fold_,(trn_idx, val_idx)in enumerate(skf.split(train_df.values, target.values)) : print("fold n°{}".format(fold_)) df_meta = pd.read_pickle(f"fold_{fold_}_meta.pickle") train_df, test_df = split_train_test(df_meta) features = [f"{v}_meta" for v in range(200)] trn_data = lgb.Dataset(train_df.iloc[trn_idx][features], label=target.iloc[trn_idx]) val_data = lgb.Dataset(train_df.iloc[val_idx][features], label=target.iloc[val_idx]) num_round = 1000000 clf = lgb.train(param, trn_data, num_round, valid_sets=[trn_data, val_data], verbose_eval=1000, early_stopping_rounds=2000) oof[val_idx] = clf.predict(train_df.iloc[val_idx][features], num_iteration=clf.best_iteration) fold_importance_df = pd.DataFrame() fold_importance_df["feature"] = features fold_importance_df["importance"] = clf.feature_importance() fold_importance_df["fold"] = fold_ + 1 feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0) predictions += clf.predict(test_df[features], num_iteration=clf.best_iteration)/ N_SPLITS print("CV score: {:<8.5f}".format(roc_auc_score(target, oof)) )<save_to_csv>
testing_set_dummy = pd.get_dummies(testing_set[training_set_x],drop_first=True )
Titanic - Machine Learning from Disaster
394,817
sub_df = pd.DataFrame({"ID_code":test_df["ID_code"].values}) sub_df["target"] = predictions sub_df.to_csv("submission.csv", index=False )<save_to_csv>
ss = MinMaxScaler() training_set_dummy_ss= ss.fit_transform(training_set_dummy) testing_set_dummy_ss= ss.fit_transform(testing_set_dummy )
Titanic - Machine Learning from Disaster
394,817
feature_importance_df.to_csv("feature_importance_df.csv", index=False )<set_options>
classifiers = {'Gradient Boosting Classifier':GradientBoostingClassifier() ,'Adaptive Boosting Classifier':AdaBoostClassifier() ,'RadiusNN':RadiusNeighborsClassifier(radius=40.0), 'Linear Discriminant Analysis':LinearDiscriminantAnalysis() , 'GaussianNB': GaussianNB() , 'BerNB': BernoulliNB() , 'KNN': KNeighborsClassifier() , 'Random Forest Classifier': RandomForestClassifier(min_samples_leaf=10,min_samples_split=20,max_depth=4),'Decision Tree Classifier': DecisionTreeClassifier() ,'Logistic Regression':LogisticRegression() , "XGBoost": xgb.XGBClassifier() }
Titanic - Machine Learning from Disaster
394,817
warnings.filterwarnings('ignore' )<set_options>
X_training, X_validating, y_training, y_validating = train_test_split(training_set_dummy, y, test_size=0.20, random_state=11 )
Titanic - Machine Learning from Disaster
394,817
def get_logger() : FORMAT = '[%(levelname)s]%(asctime)s:%(name)s:%(message)s' logging.basicConfig(format=FORMAT) logger = logging.getLogger('main') logger.setLevel(logging.DEBUG) return logger logger = get_logger()<load_from_csv>
base_accuracy = 0 for Name,classify in classifiers.items() : classify.fit(X_training,y_training) y_predictng = classify.predict(X_validating) print('Accuracy Score of '+str(Name)+ " : " +str(met.accuracy_score(y_validating,y_predictng))) if met.accuracy_score(y_validating,y_predictng)> base_accuracy: predictions_test = classify.predict(testing_set_dummy) base_accuracy = met.accuracy_score(y_validating,y_predictng) else: continue predicted_test_value = pd.DataFrame({ 'PassengerId': pID, 'Survived': predictions_test }) predicted_test_value.to_csv("PredictedTestScore.csv", index=False )
Titanic - Machine Learning from Disaster
394,817
def read_data(nrows=None): logger.info('Input data') train_df = pd.read_csv('.. /input/santander-customer-transaction-prediction/train.csv',nrows=nrows) test_df = pd.read_csv('.. /input/santander-customer-transaction-prediction/test.csv') return train_df, test_df<load_pretrained>
cbr = xgb.XGBClassifier() cbr.fit(X_training,y_training) predictions_train = cbr.predict(X_validating) print(met.accuracy_score(y_validating,predictions_train))
Titanic - Machine Learning from Disaster
394,817
def process_data(train_df, test_df): logger.info('Features engineering') synthetic = np.load('.. /input/publicprivate/synthetic_samples_indexes.npy') synthetic = synthetic-200000 synthetic = np.array(synthetic) test_df = test_df.iloc[~test_df.index.isin(synthetic)] idx = [c for c in train_df.columns if c not in ['ID_code', 'target']] traintest = pd.concat([train_df, test_df]) traintest = traintest.reset_index(drop=True) for col in idx: varname = col + '_IsUnique' traintest[varname] = 0 _, index_, count_ = np.unique(traintest.loc[:,col].values, return_counts=True, return_index=True) traintest[varname][index_[count_ == 1]] += 1 traintest[varname] = traintest[varname] /(traintest[varname] == 1 ).sum() for col in idx: traintest[col+'_freq'] = traintest[col].map(traintest.groupby(col ).size()) for col in idx: varname = col + '_IsUnique' tmp_col = traintest.loc[traintest[varname] > 0][col] traintest[col + '_OnlyUnique'] = tmp_col traintest[col + '_OnlyUnique'] = traintest[col + '_OnlyUnique'].fillna(0) traintest[col + '_NotUnique'] = traintest[col] - traintest[col + '_OnlyUnique'] traintest[col + '_NotUnique'] = traintest[col + '_NotUnique'].replace(0,np.nan) traintest[col + '_OnlyUnique'] = traintest[col + '_OnlyUnique'].replace(0,np.nan) traintest.pop(varname) train_df = traintest[:200000] test_df = traintest[200000:] print('Train and test shape:',train_df.shape, test_df.shape) return train_df, test_df <train_model>
clf1 = GradientBoostingClassifier() clf3 = LinearDiscriminantAnalysis() clf4 = LogisticRegression() clf5 = xgb.XGBClassifier() exTreeClf = VotingClassifier(estimators=[('svc', clf1),('gbc', clf3),('lr',clf4),('lda',clf5)]) exTreeClf.fit(X_training,y_training)
Titanic - Machine Learning from Disaster
394,817
def run_model(train_df, test_df): logger.info('Prepare the model') features = [c for c in train_df.columns if c not in ['ID_code', 'target']] target = train_df['target'] logger.info('Run model') param = { 'bagging_freq': 5, 'bagging_fraction': 0.38, 'boost_from_average':'false', 'boost': 'gbdt', 'feature_fraction': 0.045, 'learning_rate': 0.0095, 'max_depth': -1, 'metric':'auc', 'min_data_in_leaf': 20, 'min_sum_hessian_in_leaf': 10.0, 'num_leaves': 3, 'num_threads': 8, 'tree_learner': 'serial', 'objective': 'binary', 'verbosity': 1 } num_round = 1000000 folds = StratifiedKFold(n_splits=5, shuffle=False, random_state=42) oof = np.zeros(len(train_df)) predictions = np.zeros(len(test_df)) for fold_,(trn_idx, val_idx)in enumerate(folds.split(train_df.values, target.values)) : print("Fold {}".format(fold_)) trn_data = lgb.Dataset(train_df.iloc[trn_idx][features], label=target.iloc[trn_idx]) val_data = lgb.Dataset(train_df.iloc[val_idx][features], label=target.iloc[val_idx]) clf = lgb.train(param, trn_data, num_round, valid_sets = [trn_data, val_data], verbose_eval=1000, early_stopping_rounds = 3500) oof[val_idx] = clf.predict(train_df.iloc[val_idx][features], num_iteration=clf.best_iteration) predictions += clf.predict(test_df[features], num_iteration=clf.best_iteration)/ folds.n_splits print("CV score: {:<8.5f}".format(roc_auc_score(target, oof))) return predictions<save_to_csv>
Titanic - Machine Learning from Disaster
394,817
def submit(test_df, predictions): logger.info('Prepare submission') all_test_df = pd.read_csv('.. /input/santander-customer-transaction-prediction/test.csv') sub = pd.DataFrame({"ID_code": all_test_df.ID_code.values}) sub["target"] = 0 sub_real = pd.DataFrame({"ID_code": test_df.ID_code.values}) sub_real["target"] = predictions sub = sub.set_index('ID_code') sub_real = sub_real.set_index('ID_code') sub.update(sub_real) sub = sub.reset_index() sub.to_csv("submission.csv", index=False )<split>
Titanic - Machine Learning from Disaster
394,817
def main(nrows=None): train_df, test_df = read_data(nrows) train_df, test_df = process_data(train_df, test_df) predictions = run_model(train_df, test_df) submit(test_df, predictions )<define_variables>
predicted_test = [] for x in exTreeClf.predict(testing_set_dummy): predicted_test.append(x) predicted_test_value = pd.DataFrame({ 'PassengerId': pID, 'Survived': predicted_test }) predicted_test_value.to_csv("PredictedTestScore.csv", index=False )
Titanic - Machine Learning from Disaster
394,817
<load_from_csv><EOS>
Titanic - Machine Learning from Disaster
11,126,798
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<drop_column>
pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.width', 1000)
Titanic - Machine Learning from Disaster
11,126,798
train_df = train.copy() test_df = test.copy() train_df.drop(columns=["ID_code", "target"], inplace=True) test_df.drop(columns=["ID_code"], inplace=True) target = train.target<randomize_order>
train = pd.read_csv('.. /input/titanic/train.csv') test = pd.read_csv('.. /input/titanic/test.csv') dataset = [train, test] print('Entries in training set: ', len(train), ' Entries in testing set: ',len(test)) for df in dataset: print(df.isna().sum()) train_test_comb = pd.concat([train, test], axis=0 )
Titanic - Machine Learning from Disaster
11,126,798
def augment_train(df_train, y_train): t0 = df_train[y_train == 0].copy() t1 = df_train[y_train == 1].copy() i = 0 N = 3 for I in range(0): for col in df_train.columns: i = i + 1000 np.random.seed(i) np.random.shuffle(t0[col].values) np.random.shuffle(t1[col].values) df_train = pd.concat([df_train, t0.copy() ]) df_train = pd.concat([df_train, t1.copy() ]) y_train = pd.concat([y_train, pd.Series([0] * t0.shape[0]), pd.Series([1] * t1.shape[0])]) return df_train, y_train<choose_model_class>
for df in dataset: df['Familysize'] = df['SibSp']+df['Parch'] df['Title'] = df['Name'].str.split(', ', expand=True)[1].str.split('.', expand=True)[0] title_names =(df['Title'].value_counts() > 10) df['Title'] = df['Title'].apply(lambda x: x if title_names.loc[x] == True else 'Misc') train_test_comb = pd.concat([train, test], axis=0) print(train_test_comb['Title'].value_counts() )
Titanic - Machine Learning from Disaster
11,126,798
model = CatBoostClassifier(subsample=0.36, custom_loss='Logloss', random_strength = 0, max_depth=3, eval_metric="AUC", learning_rate=0.02, iterations=60000, bootstrap_type='Bernoulli', l2_leaf_reg=0.3, task_type="GPU", random_seed=432013, od_type="Iter", border_count=128 )<split>
for df in dataset: df['Fare_cat'] = pd.qcut(df['Fare'], q=4, labels=(1,2,3,4)) df['Age_cat'] = pd.qcut(df['Age'], q=4, labels=(1,2,3,4)) df['Familysize'] = df['Familysize'].apply(lambda x: 'Alone' if x==0 else('Small' if x>0 and x<5 else('Medium' if x>=5 and x<7 else 'Large')) )
Titanic - Machine Learning from Disaster
11,126,798
def run_cat(model, trt, tst, tar,n_splits=5, plot=False): kf = KFold(n_splits=n_splits, random_state=432013, shuffle=True) oof = np.zeros(len(trt)) feature_importance_df = pd.DataFrame() y_valid_pred = 0 * tar y_test_pred = 0 for n_fold,(train_index, valid_index)in enumerate(kf.split(trt, tar)) : y_train, y_valid = tar.iloc[train_index], tar.iloc[valid_index] X_train, X_valid = trt.iloc[train_index,:], trt.iloc[valid_index,:] X_train, y_train = augment_train(X_train, y_train) X_train,X_valid = generate_fe(trn=X_train,tst=X_valid) _train = Pool(X_train, label=y_train) _valid = Pool(X_valid, label=y_valid) print("Fold ", n_fold) fit_model = model.fit(_train, verbose_eval=1000, early_stopping_rounds=1000, eval_set=[_valid], use_best_model=True, plot=False, ) pred = fit_model.predict_proba(X_valid)[:,1] oof[valid_index] = pred print("auc = ", roc_auc_score(y_valid, pred)) y_valid_pred.iloc[valid_index] = pred y_test_pred += fit_model.predict_proba(test_fe)[:,1] y_test_pred /= n_splits print("average auc:", roc_auc_score(tar, oof)) return y_test_pred, oof<feature_engineering>
for df in dataset: df['Age_cat'] = df['Age_cat'].astype(np.int32) df['Fare_cat'] = df['Fare_cat'].astype(np.int32) df.Title.replace({'Mr':1, 'Mrs':2, 'Miss':3, 'Master':4, 'Misc':5}, inplace=True) df.Sex.replace({'female':0, 'male': 1}, inplace=True) df.Embarked.replace({'S':1, 'C':2, 'Q':3}, inplace=True )
Titanic - Machine Learning from Disaster
11,126,798
def generate_fe(trn, tst): real,syn = detect_test(test_df[features]) al = pd.concat([trn,tst,test_df.iloc[real]],axis=0) for c in features: trn[c+"_test"]=trn[c].map(al[c].value_counts()) trn[c+"_test"] = trn[c+"_test"]*trn[c] tst[c+"_test"]=tst[c].map(al[c].value_counts()) tst[c+"_test"] = tst[c+"_test"]*tst[c] return trn, tst<define_variables>
features = ['Age_cat', 'Fare_cat', 'Pclass', 'Sex', 'Embarked', 'Title', 'Familysize'] encoded_fearures = [] for df in dataset: for feature in features: encoded = OneHotEncoder().fit_transform(df[feature].values.reshape(-1, 1)).toarray() n = df[feature].nunique() cols = [f'{feature}_{n}' for n in range(1, n + 1)] encoded_df = pd.DataFrame(encoded, columns=cols) encoded_df.index = df.index encoded_fearures.append(encoded_df) train_one = pd.concat([train, *encoded_fearures[:7]], axis=1) test_one = pd.concat([test, *encoded_fearures[7:]], axis=1) dataset = [train_one, test_one]
Titanic - Machine Learning from Disaster
11,126,798
features = [c for c in train_df.columns if c not in ["ID_code","target"]]<count_unique_values>
for df in dataset: df.drop(['PassengerId', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Embarked', 'Familysize', 'Title', 'Fare_cat', 'Age_cat' ], axis=1, inplace=True )
Titanic - Machine Learning from Disaster
11,126,798
def detect_test(test_df): df_test=test_df.values unique_count = np.zeros_like(df_test) for feature in tqdm(range(df_test.shape[1])) : _, index_, count_ = np.unique(df_test[:, feature], return_counts=True, return_index=True) unique_count[index_[count_ == 1], feature] += 1 real_samples_indexes = np.argwhere(np.sum(unique_count, axis=1)> 0)[:, 0] synthetic_samples_indexes = np.argwhere(np.sum(unique_count, axis=1)== 0)[:, 0] return real_samples_indexes,synthetic_samples_indexes<feature_engineering>
features = [x for x in train_one.columns if x!='Survived'] x = train_one[features].to_numpy() y = train_one['Survived'].to_numpy() x_train, x_val, y_train, y_val = train_test_split(x, y, train_size = int(0.95*len(train_one)) , shuffle=False ,random_state=1400) print(x_train.shape, y_train.shape, x_val.shape, y_val.shape )
Titanic - Machine Learning from Disaster
11,126,798
def generate_fe_test(tst): re,sy = detect_test(tst[features]) al = pd.concat([train_df,test_df.iloc[re]],axis=0) for c in features: tst[c+"_test"]=tst[c].map(al[c].value_counts()) tst[c+"_test"] = tst[c+"_test"]*tst[c] return tst test_fe = generate_fe_test(test_df[features] )<concatenate>
clf = RandomForestClassifier(criterion='gini', n_estimators=300, max_depth=4, min_samples_split=4, min_samples_leaf=7, max_features='auto', oob_score=True, random_state=1400, n_jobs=-1) clf.fit(x_train, y_train) y_pred = clf.predict(x_val) cm = confusion_matrix(y_val, y_pred) print(cm) print(classification_report(y_val, y_pred))
Titanic - Machine Learning from Disaster
11,126,798
y_test_pred, oof = run_cat(model,train_df, test_df, target )<save_to_csv>
test_data = test_one[features].to_numpy() prediction_clf = clf.predict(test_data) print(len(prediction_clf)) output = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': prediction_clf}) output.to_csv('/kaggle/working/my_submission.csv', index=False )
Titanic - Machine Learning from Disaster
11,126,798
submission = pd.read_csv(root.joinpath("sample_submission.csv")) submission['target'] = y_test_pred pd.Series(oof ).to_csv("Cat_oof.csv", index = False) submission.to_csv('submission_cb_light_0.8999.csv', index=False )<load_from_csv>
seed = 1400 tf.random.set_seed(seed) my_init = keras.initializers.glorot_uniform(seed=seed) model = keras.models.Sequential() model.add(keras.layers.Input(shape=(x_train.shape[1],))) model.add(keras.layers.Dense(360, activation='selu', kernel_initializer=my_init)) model.add(keras.layers.Dropout(0.2)) model.add(keras.layers.Dense(360, activation='selu', kernel_initializer=my_init)) model.add(keras.layers.Dropout(0.2)) model.add(keras.layers.Dense(360, activation='selu', kernel_initializer=my_init)) model.add(keras.layers.Dense(1, activation='sigmoid')) model.summary() model.compile(optimizer='adam', loss = keras.losses.BinaryCrossentropy() , metrics=['accuracy']) early_stopping = keras.callbacks.EarlyStopping(monitor='accuracy', patience=3, mode='max', restore_best_weights=True) reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='accuracy', factor=0.1, patience=3, mode='max', min_lr=0) model.fit(x_train, y_train, epochs = 50, batch_size = 2, callbacks=[reduce_lr, early_stopping], verbose = 1) val_loss, val_acc = model.evaluate(x_val, y_val, verbose=1) print(' Validation accuracy:', val_acc )
Titanic - Machine Learning from Disaster
11,126,798
d_train = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv') d_test = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv' )<set_options>
target_col =[] test_data = test_one[features].to_numpy() prediction_nn = model.predict(test_data) for i in prediction_nn: target_col.append(int(round(i[0]))) output = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': target_col}) output.to_csv('my_submission.csv', index=False )
Titanic - Machine Learning from Disaster
10,828,575
pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None )<set_options>
%matplotlib inline plt.style.use("ggplot") warnings.simplefilter("ignore" )
Titanic - Machine Learning from Disaster
10,828,575
warnings.filterwarnings('ignore' )<count_missing_values>
df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_train.head()
Titanic - Machine Learning from Disaster
10,828,575
d_train.isnull().sum()<count_missing_values>
df_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_test.head()
Titanic - Machine Learning from Disaster
10,828,575
d_test.isnull().sum()<sort_values>
df_train['Age'].isna().sum()
Titanic - Machine Learning from Disaster
10,828,575
d_train.corr() ['SalePrice'].sort_values(ascending=False )<drop_column>
df_train['Age'] = df_train['Age'].fillna(0) df_train['Age'].isna().sum()
Titanic - Machine Learning from Disaster
10,828,575
d_train = d_train.drop(d_train[(d_train['SalePrice']>740000)&(d_train['SalePrice']<756000)].index ).reset_index(drop=True )<drop_column>
df_train.drop(columns = ['Name','Ticket' ,'Fare' , 'Cabin'] , inplace= True )
Titanic - Machine Learning from Disaster
10,828,575
d_train = d_train.drop(d_train[(d_train['1stFlrSF']>4690)&(d_train['1stFlrSF']<4700)].index ).reset_index(drop=True )<drop_column>
cat_vars=['Sex' , 'Embarked'] for var in cat_vars: cat_list='var'+'_'+var cat_list = pd.get_dummies(df_train[var], prefix=var) df_train1=df_train.join(cat_list) df_train=df_train1
Titanic - Machine Learning from Disaster
10,828,575
d_train = d_train.drop(d_train[(d_train['GrLivArea']>4000)&(d_train['SalePrice']<250000)].index ).reset_index(drop=True )<drop_column>
df_train.drop(columns = ['Sex','Embarked'] , inplace= True) df_train.head(5 )
Titanic - Machine Learning from Disaster
10,828,575
d_train = d_train.drop(d_train[(d_train['GarageCars']>3)&(d_train['SalePrice']<290000)].index ).reset_index(drop=True )<drop_column>
s=0 d=0 for i in df_train['Survived']: if i==0: d+=1 else: s+=1 print('Dead - ',d,' Survived -',s )
Titanic - Machine Learning from Disaster
10,828,575
d_train = d_train.drop(d_train[(d_train['GarageArea']>1240)&(d_train['GarageArea']<1400)].index ).reset_index(drop=True )<compute_train_metric>
X = df_train.loc[:, df_train.columns != 'Survived'] Y = df_train.Survived
Titanic - Machine Learning from Disaster
10,828,575
def Series_stats(var, category, prop1, prop2): s1 = d_train[(d_train[category]==prop1)][var] s2 = d_train[(d_train[category]==prop2)][var] t, p = ttest_ind(s1,s2,equal_var = False) print("Two-sample t-test: t={}, p={}".format(round(t,5),p)) if(( p < 0.05)and(np.abs(t)> 1.96)) : print(" REJECT the Null Hypothesis and state that: at 5% significance level, the mean {} of {}-{} and {}-{} are not equal.".format(var, prop1, category, prop2, category)) print(" YES, the {} of {}-{} differ significantly from {}-{} in the current dataset.".format(var, prop1, category, prop2, category)) print(" The mean value of {} for {}-{} is {} and for {}-{} is {}".format(var, prop1, category, round(s1.mean() ,2), prop2, category, round(s2.mean() ,2))) else: print(" FAIL to Reject the Null Hypothesis and state that: at 5% significance level, the mean {} of {} - {} and {} - {} are equal.".format(var, prop1, category, prop2, category)) print(" NO, the {} of {}-{} NOT differ significantly from {}-{} in the current dataset".format(var, prop1, category, prop2, category)) print(" The mean value of {} for {}-{} is {} and for {}-{} is {}".format(var, prop1, category, round(s1.mean() ,2), prop2, category, round(s2.mean() ,2)) )<data_type_conversions>
df_test['Age'] = df_test['Age'].fillna(0) df_test.info()
Titanic - Machine Learning from Disaster
10,828,575
d_train['Exterior1st'].fillna("VinylSd", inplace=True) d_train['Exterior2nd'].fillna("VinylSd", inplace=True) d_train['KitchenQual'].fillna("TA", inplace=True) d_train['Functional'].fillna("Typ", inplace=True) d_train['SaleType'].fillna("WD", inplace=True) d_test['MSZoning'].fillna('RL', inplace=True) d_test['Utilities'].fillna('NoSeWa', inplace=True) d_test['Exterior1st'].fillna("VinylSd", inplace=True) d_test['Exterior2nd'].fillna("VinylSd", inplace=True) d_test['KitchenQual'].fillna("TA", inplace=True) d_test['Functional'].fillna("Typ", inplace=True) d_test['SaleType'].fillna("WD", inplace=True) d_test['Electrical'].fillna("SBrkr") d_train = d_train.fillna("None") d_test = d_test.fillna("None" )<count_missing_values>
cat_vars=['Sex' , 'Embarked'] for var in cat_vars: cat_list='var'+'_'+var cat_list = pd.get_dummies(df_test[var], prefix=var) df_test1=df_test.join(cat_list) df_test=df_test1
Titanic - Machine Learning from Disaster
10,828,575
d_train.isnull().sum()<count_missing_values>
from sklearn import tree from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.tree import export_graphviz from IPython.display import SVG from graphviz import Source from IPython.display import display from ipywidgets import interactive, IntSlider, FloatSlider, interact import ipywidgets from IPython.display import Image from subprocess import call import matplotlib.image as mpimg
Titanic - Machine Learning from Disaster
10,828,575
<drop_column><EOS>
estimator = plot_tree_rf(crit='gini', bootstrap='False' , depth =16 , forests=100 , min_split=3 , min_leaf= 3) y_pred_rf = estimator.predict(df_test) print('len',len(y_pred_rf)) sub = pd.DataFrame(columns=['PassengerId' , 'Survived']) sub['PassengerId'] = df_test['PassengerId'].astype(int) sub['Survived'] = y_pred_rf.astype(int) sub.to_csv('sub_rf.csv', index=False )
Titanic - Machine Learning from Disaster
9,877,320
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<categorify>
%matplotlib inline sns.set() warnings.filterwarnings("ignore" )
Titanic - Machine Learning from Disaster
9,877,320
cols = np.array(d_train.columns[d_train.dtypes != object]) d = defaultdict(LabelEncoder) train = train.apply(lambda x: d[x.name].fit_transform(x)) test = test.apply(lambda x: d[x.name].transform(x)) train[cols] = d_train[cols] test[np.delete(cols,len(cols)-1)]=d_test[np.delete(cols,len(cols)-1)]<feature_engineering>
train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
9,877,320
train["O_Style"] = train["Condition1"] + train["Condition2"] + train["BldgType"] + train["HouseStyle"]+ train["RoofStyle"] + train["MasVnrType"] train["O_Neighbor"] = train["Street"] + train["LotShape"] + train["Neighborhood"] * train["Condition1"] train["O_Street"] = train["Street"] * train["LotShape"] + train["LandContour"] * train["LotConfig"] train["SqFtPerRoom"] = train["GrLivArea"] /(train["TotRmsAbvGrd"] + train["FullBath"] +train["HalfBath"] + train["KitchenAbvGr"]) train["HighQualSF"] = train["1stFlrSF"] + train["2ndFlrSF"] + train["TotalBsmtSF"] train["Years_Years"] = train["YrSold"] - train["YearRemodAdd"] train['Total_Home_Quality'] = train['OverallQual'] + train['OverallCond'] train['Total_Bathrooms'] =(train['FullBath'] +(0.5 * train['HalfBath'])+ train['BsmtFullBath'] +(0.5 * train['BsmtHalfBath'])) test["O_Style"] = test["Condition1"] + test["Condition2"] + test["BldgType"] + test["HouseStyle"]+ test["RoofStyle"] + test["MasVnrType"] test["O_Neighbor"] = test["Street"] + test["LotShape"] + test["Neighborhood"] * test["Condition1"] test["O_Street"] = test["Street"] * test["LotShape"] + test["LandContour"] * test["LotConfig"] test["SqFtPerRoom"] = test["GrLivArea"] /(test["TotRmsAbvGrd"] + test["FullBath"] +test["HalfBath"] + test["KitchenAbvGr"]) test["HighQualSF"] = test["1stFlrSF"] + test["2ndFlrSF"] + test["TotalBsmtSF"] test["Years_Years"] = test["YrSold"] - test["YearRemodAdd"] test['Total_Home_Quality'] = test['OverallQual'] + test['OverallCond'] test['Total_Bathrooms'] =(test['FullBath'] +(0.5 * test['HalfBath'])+ test['BsmtFullBath'] +(0.5 * test['BsmtHalfBath']))<drop_column>
train.isnull().sum()
Titanic - Machine Learning from Disaster
9,877,320
train = train.drop(['Id'], axis=1) test = test.drop(['Id'], axis=1 )<import_modules>
test.isnull().sum()
Titanic - Machine Learning from Disaster
9,877,320
from sklearn import metrics from sklearn import model_selection from sklearn import preprocessing from sklearn.datasets import make_classification from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report, confusion_matrix from sklearn.metrics import mean_squared_error, mean_absolute_error from sklearn.metrics import plot_confusion_matrix from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsRegressor from sklearn.tree import DecisionTreeRegressor from xgboost.sklearn import XGBRegressor from catboost import CatBoostRegressor from sklearn.ensemble import GradientBoostingRegressor,AdaBoostRegressor,BaggingRegressor, RandomForestRegressor import xgboost as xgb import lightgbm as lgb<split>
test.isnull().sum()
Titanic - Machine Learning from Disaster
9,877,320
X = train.drop(columns=['SalePrice'] ).values y = np.log1p(train["SalePrice"]) Z = test.values X_train, X_test1, y_train, y_test1 = train_test_split(X, y, test_size = 0.0001, random_state = 42) X_train1, X_test, y_train1, y_test = train_test_split(X, y, test_size = 0.5, random_state = 42 )<train_model>
survived = train[train['Survived'] == 1] not_survived = train[train['Survived'] == 0] print("Survived: %i(%.1f%%)"%(len(survived), float(len(survived)) /len(train)*100.0)) print("Not Survived: %i(%.1f%%)"%(len(not_survived), float(len(not_survived)) /len(train)*100.0)) print("Total: %i"%len(train))
Titanic - Machine Learning from Disaster
9,877,320
Acc = pd.DataFrame(index=None, columns=['model','Root Mean Squared Error','Accuracy on Traing set','Accuracy on Testing set']) regressors = [['DecisionTreeRegressor',DecisionTreeRegressor() ], ['XGBRegressor', XGBRegressor() ], ['CatBoostRegressor', CatBoostRegressor(verbose= False)], ['LGBMRegressor',lgb.LGBMRegressor() ], ['GradientBoostingRegressor',GradientBoostingRegressor() ], ['ExtraTreesRegressor',ExtraTreesRegressor() ]] for mod in regressors: name = mod[0] model = mod[1] model.fit(X_train1,y_train1) ATrS = model.score(X_train1,y_train1) ATeS = model.score(X_test,y_test) RMSE = mean_squared_error(y_test, model.predict(X_test)) Acc = Acc.append(pd.Series({'model':name, 'Root Mean Squared Error': RMSE,'Accuracy on Traing set':ATrS,'Accuracy on Testing set':ATeS}),ignore_index=True) Acc.sort_values(by='Root Mean Squared Error' )<train_model>
train.Pclass.value_counts()
Titanic - Machine Learning from Disaster
9,877,320
def modelFitter(maxDepth, num_leaves, learning_rate, n_estimators): model = lgb.LGBMRegressor(learning_rate=learning_rate, num_leaves=num_leaves.astype("int32"), max_depth=maxDepth.astype("int32"), bagging_freq=bagging_freq.astype("int32")) evalSet = [(X_test, y_test)] model.fit(X_train, y_train, eval_metric="rmse", eval_set=evalSet, early_stopping_rounds=50, verbose=False) bestScore = model.best_score_[list(model.best_score_.keys())[0]]['rmse'] return -bestScore pbounds = {'maxDepth':(1,5),'num_leaves':(10, 35),'learning_rate':(0.005, 0.01), 'bagging_freq':(4, 6)} <compute_train_metric>
pclass_survived = train.groupby('Pclass' ).Survived.value_counts() pclass_survived
Titanic - Machine Learning from Disaster
9,877,320
LGBMR = lgb.LGBMRegressor(objective='regression', num_leaves=6,learning_rate=0.02, n_estimators=2000,max_bin=100, bagging_fraction=0.8,bagging_freq=4, bagging_seed=8,feature_fraction=0.2,feature_fraction_seed=8, min_sum_hessian_in_leaf = 11,verbose=-1,random_state=42) LGBMR.fit(X_train,y_train) ATrS = LGBMR.score(X_train,y_train) ATeS = LGBMR.score(X_test,y_test) RMSE = mean_squared_error(y_test, LGBMR.predict(X_test)) print("Root Mean Squared: {}, Accuracy Train set: {},Accuracy Test set: {}".format(RMSE, ATrS, ATeS))<choose_model_class>
pclass_survived_average = train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean() pclass_survived_average
Titanic - Machine Learning from Disaster
9,877,320
gbr = GradientBoostingRegressor() params = {'loss': ['huber'], 'learning_rate': [0.012, 0.015, 0.02], 'max_depth': [3, 4, 5], 'min_samples_leaf' : [10, 12, 15], 'min_samples_split' : [3, 5, 7]} <train_model>
train.Sex.value_counts()
Titanic - Machine Learning from Disaster
9,877,320
GBR = GradientBoostingRegressor(n_estimators=1000,learning_rate=0.012,max_depth=6,max_features='sqrt',min_samples_leaf=10, min_samples_split=4,loss='huber',random_state=12) GBR.fit(X_train,y_train) ATrS = GBR.score(X_train,y_train) ATeS = GBR.score(X_test,y_test) RMSE = mean_squared_error(y_test, GBR.predict(X_test)) print("Root Mean Squared: {}, Accuracy Train set: {},Accuracy Test set: {}".format(RMSE, ATrS, ATeS))<train_on_grid>
sex_survival = train.groupby('Sex' ).Survived.value_counts() sex_survival
Titanic - Machine Learning from Disaster
9,877,320
train_data = X_train train_labels = y_train model = CatBoostRegressor() grid = {'iterations': [4000, 7500], 'learning_rate': [0.003, 0.005, 0.007], 'depth': [3, 7, 10], 'l2_leaf_reg': [1, 2], 'random_seed': [12]} <compute_train_metric>
sex_survived_average = train[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).mean() sex_survived_average
Titanic - Machine Learning from Disaster
9,877,320
params = {'iterations': 5500, 'learning_rate': 0.005, 'loss_function':'RMSE', 'depth': 8, 'l2_leaf_reg': 1, 'eval_metric':'RMSE', 'verbose': False, 'random_seed': 12} CBR = CatBoostRegressor(**params) CBR.fit(X_train,y_train) ATrS = CBR.score(X_train,y_train) ATeS = CBR.score(X_test,y_test) RMSE = mean_squared_error(y_test, CBR.predict(X_test)) print("Root Mean Squared: {}, Accuracy Train set: {},Accuracy Test set: {}".format(RMSE, ATrS, ATeS))<compute_train_metric>
train.Embarked.value_counts()
Titanic - Machine Learning from Disaster
9,877,320
dtrain = xgb.DMatrix(X_train, label=y_train) dtest = xgb.DMatrix(X_test) def xgb_evaluate(learning_rate, max_depth, gamma, subsample, colsample_bytree, reg_alpha): params = {'learning_rate':learning_rate, 'max_depth': int(max_depth), 'gamma': gamma, 'subsample':subsample, 'colsample_bytree': colsample_bytree, 'reg_alpha':reg_alpha} cv_result = xgb.cv(params, dtrain, num_boost_round=100, nfold=3) return -1.0 * cv_result['test-rmse-mean'].iloc[-1] <compute_train_metric>
train.groupby('Embarked' ).Survived.value_counts()
Titanic - Machine Learning from Disaster
9,877,320
XGBR = xgb.XGBRegressor(colsample_bytree=0.5149443835418306, gamma=0.0,learning_rate=0.03,max_depth=4, reg_alpha=0.005412343201815549, subsample=0.7119381043613345, n_estimators=1200, min_child_weight=0,nthread=-1,scale_pos_weight=1,seed=27,random_state=42) XGBR.fit(X_train,y_train) ATrS = XGBR.score(X_train,y_train) ATeS = XGBR.score(X_test,y_test) RMSE = mean_squared_error(y_test, XGBR.predict(X_test)) print("Root Mean Squared: {}, Accuracy Train set: {},Accuracy Test set: {}".format(RMSE, ATrS, ATeS))<save_to_csv>
train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
9,877,320
result =(np.expm1(GBR.predict(Z)) + np.expm1(LGBMR.predict(Z)) + np.expm1(CBR.predict(Z)) + np.expm1(XGBR.predict(Z)))/4 sub = pd.DataFrame() sub = pd.DataFrame({'Id':d_test.Id,'SalePrice':result}) sub.to_csv('submission.csv',index=False) sub.head(2 )<load_from_csv>
train.Parch.value_counts()
Titanic - Machine Learning from Disaster
9,877,320
warnings.filterwarnings(action="ignore") target_name = 'SalePrice' dataset_train_raw = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv') dataset_test = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv') dataset_train_raw<prepare_x_and_y>
train.groupby('Parch' ).Survived.value_counts()
Titanic - Machine Learning from Disaster
9,877,320
ignore_feature = ['Id'] y_train = dataset_train_raw[target_name] dataset_train = dataset_train_raw.drop([target_name] + ignore_feature, axis=1, inplace=False) dataset_test.drop(ignore_feature, axis=1, inplace=True) all_data = pd.concat([dataset_train, dataset_test], axis=0, sort=False) all_data<define_variables>
train[['Parch', 'Survived']].groupby(['Parch'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
9,877,320
specially_missed = ['Alley', 'PoolQC', 'MiscFeature', 'Fence', 'FireplaceQu', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'MasVnrType'] for feature in specially_missed: all_data[feature] = all_data[feature].fillna('None' )<define_variables>
train.SibSp.value_counts()
Titanic - Machine Learning from Disaster
9,877,320
numeric_missed = ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath', 'GarageYrBlt', 'GarageArea', 'GarageCars', 'MasVnrArea'] for feature in numeric_missed: all_data[feature] = all_data[feature].fillna(0 )<data_type_conversions>
train.groupby('SibSp' ).Survived.value_counts()
Titanic - Machine Learning from Disaster
9,877,320
all_data['MSSubClass'] = all_data['MSSubClass'].astype(str) all_data['YrSold'] = all_data['YrSold'].astype(str) all_data['MoSold'] = all_data['MoSold'].astype(str )<categorify>
train[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
9,877,320
all_data['Functional'] = all_data['Functional'].fillna('Typ') all_data['Utilities'] = all_data['Utilities'].fillna('AllPub') all_data['KitchenQual'] = all_data['KitchenQual'].fillna('TA') all_data['Electrical'] = all_data['Electrical'].fillna('SBrkr') all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode() [0]) all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode() [0]) all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode() [0]) all_data['LotFrontage'] = all_data.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.mean())) all_data['MSZoning'] = all_data.groupby('MSSubClass')['MSZoning'].transform(lambda x: x.fillna(x.mode() [0]))<sort_values>
total_survived = train[train['Survived']==1] total_not_survived = train[train['Survived']==0] male_survived = train[(train['Survived']==1)&(train['Sex']=="male")] female_survived = train[(train['Survived']==1)&(train['Sex']=="female")] male_not_survived = train[(train['Survived']==0)&(train['Sex']=="male")] female_not_survived = train[(train['Survived']==0)&(train['Sex']=="female")]
Titanic - Machine Learning from Disaster
9,877,320
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index skewed_feats = all_data[numeric_feats].apply(lambda x: skew(x)).sort_values(ascending=False) high_skew = skewed_feats[abs(skewed_feats)> 0.5] high_skew<feature_engineering>
train_test_data = [train, test] for dataset in train_test_data: dataset['Title'] = dataset['Name'].str.extract('([A-Za-z]+)\.' )
Titanic - Machine Learning from Disaster
9,877,320
for feature in high_skew.index: all_data[feature] = np.log1p(all_data[feature] )<feature_engineering>
for dataset in train_test_data: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col', \ 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Other') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
9,877,320
all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF'] all_data['SqFtPerRoom'] = all_data['GrLivArea'] /(all_data['TotRmsAbvGrd'] + all_data['FullBath'] + all_data['HalfBath'] + all_data['KitchenAbvGr']) all_data['TotalHomeQuality'] = all_data['OverallQual'] + all_data['OverallCond'] all_data['TotalBathrooms'] =(all_data['FullBath'] +(0.5 * all_data['HalfBath'])+ all_data['BsmtFullBath'] +(0.5 * all_data['BsmtHalfBath']))<categorify>
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Other": 5} for dataset in train_test_data: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0 )
Titanic - Machine Learning from Disaster