kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
1,414,913 | df['area']=[float(a)for a in [i.replace(',','')for i in list(df['area'].values)]]<feature_engineering> | gc.enable()
folds = KFold(n_splits=4, shuffle=True, random_state=546789)
oof_preds = np.zeros(data.shape[0])
sub_preds = np.zeros(test.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in data.columns if f not in ['SK_ID_CURR']]
for n_fold,(trn_idx, val_idx)in enumerate(folds.split(data)) :
trn_x, trn_y = data[feats].iloc[trn_idx], y.iloc[trn_idx]
val_x, val_y = data[feats].iloc[val_idx], y.iloc[val_idx]
clf = LGBMClassifier(
n_estimators=10000,
learning_rate=0.03,
num_leaves=34,
colsample_bytree=0.9,
subsample=0.8,
max_depth=8,
reg_alpha=.1,
reg_lambda=.1,
min_split_gain=.01,
min_child_weight=300,
silent=-1,
verbose=-1,
)
clf.fit(trn_x, trn_y,
eval_set= [(trn_x, trn_y),(val_x, val_y)],
eval_metric='auc', verbose=100, early_stopping_rounds=100
)
oof_preds[val_idx] = clf.predict_proba(val_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d AUC : %.6f' %(n_fold + 1, roc_auc_score(val_y, oof_preds[val_idx])))
del clf, trn_x, trn_y, val_x, val_y
gc.collect()
print('Full AUC score %.6f' % roc_auc_score(y, oof_preds))
test['TARGET'] = sub_preds
test[['SK_ID_CURR', 'TARGET']].to_csv('submission1LGBM1.csv', index=False)
| Home Credit Default Risk |
1,537,543 | df['densidade_dem']=[float(i)for i in [i.replace(',','')for i in [str(i)for i in df['densidade_dem']]]]<feature_engineering> | M1 = pd.read_csv('.. /input/gogomaster/Think1.csv')
M2 = pd.read_csv('.. /input/gogomaster/11111.csv')
M3 = pd.read_csv('.. /input/gogomaster/22222.csv')
M4 = pd.read_csv('.. /input/gogomaster/33333.csv')
M5 = pd.read_csv('.. /input/gogomaster/44444.csv')
M6 = pd.read_csv('.. /input/gogomaster/55555.csv' ) | Home Credit Default Risk |
1,537,543 | test['area']=[float(a)for a in [i.replace(',','')for i in list(test['area'].values)]]<feature_engineering> | def merge_dataframes(dfs, merge_keys):
dfs_merged = reduce(lambda left,right: pd.merge(left, right, on=merge_keys), dfs)
return dfs_merged | Home Credit Default Risk |
1,537,543 | test['densidade_dem']=[float(i)for i in [i.replace(',','')for i in [str(i)for i in test['densidade_dem']]]]<count_missing_values> | dfs = [M1,M2,M3,M4,M5,M6]
merge_keys=['SK_ID_CURR']
df = merge_dataframes(dfs, merge_keys=merge_keys ) | Home Credit Default Risk |
1,537,543 | test['ranking_igm'].isnull().sum() ,sum(test['ranking_igm'].isnull() ==False )<define_variables> | df.columns = ['SK_ID_CURR','T1','T2','T3','T4','T5','T6']
df.head() | Home Credit Default Risk |
1,537,543 | feats=[a for a in df.columns if a not in ['nota_mat']+d]<data_type_conversions> | pred_prob = 0.7 * df['T3'] + 0.3 * df['T1']
pred_prob.head() | Home Credit Default Risk |
1,537,543 | for i in df[feats].columns:
if df[i].isnull().sum() >0:
if df[i].dtypes!='object':
df[i]=df[i].fillna(df[i].mean())
else:
continue
else:
continue<count_missing_values> | sub = pd.DataFrame()
sub['SK_ID_CURR'] = df['SK_ID_CURR']
sub['target']= pred_prob | Home Credit Default Risk |
1,537,543 | df[feats].isnull().sum()<data_type_conversions> | sub.to_csv('ldit.csv', index=False ) | Home Credit Default Risk |
1,537,543 | for i in test[feats].columns:
if test[i].isnull().sum() >0:
if test[i].dtypes!='object':
test[i]=test[i].fillna(test[i].mean())
else:
continue
else:
continue<count_missing_values> | B_prob = 0.6 * df['T3'] + 0.2 * df['T1'] + 0.2 * df['T2'] | Home Credit Default Risk |
1,537,543 | df.loc[:,feats].isnull().sum()<feature_engineering> | SUB = pd.DataFrame()
SUB['SK_ID_CURR'] = df['SK_ID_CURR']
SUB['TARGET'] = B_prob
SUB.to_csv('Blendss.csv', index=False ) | Home Credit Default Risk |
1,537,543 | for i in range(0,len(df.columns)) :
df[df.columns[i]] = np.log(df[df.columns[i]] + 1 )<feature_engineering> | corr_pred = 0.6 * df['T2'] + 0.05 * df['T3'] + 0.05 * df['T4'] + 0.1 * df['T5'] + 0.2 * df['T1']
corr_pred.head() | Home Credit Default Risk |
1,537,543 | <import_modules><EOS> | SuB = pd.DataFrame()
SuB['SK_ID_CURR'] = df['SK_ID_CURR']
SuB['TARGET'] = corr_pred
SuB.to_csv('corr_blend.csv', index=False ) | Home Credit Default Risk |
6,017,848 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<split> | import numpy as np
import pandas as pd
import os | Home Credit Default Risk |
6,017,848 | train, valid = train_test_split(df, random_state=42 )<split> | app_train = pd.read_csv('.. /input/home-credit-simple-featuers/simple_features_train.csv')
app_test = pd.read_csv('.. /input/home-credit-simple-featuers/simple_features_test.csv' ) | Home Credit Default Risk |
6,017,848 | train, valid = train_test_split(df, random_state=42 )<define_search_space> | sk_id = pd.read_csv('.. /input/home-credit-default-risk/application_test.csv' ) | Home Credit Default Risk |
6,017,848 |
<train_on_grid> | train = app_train.drop(columns = ['TARGET'])
train_labels = app_train['TARGET'] | Home Credit Default Risk |
6,017,848 |
<define_search_space> | X_train, X_test, y_train, y_test = train_test_split(
train, train_labels, test_size=0.2 ) | Home Credit Default Risk |
6,017,848 | param_grid = {
'bootstrap': [True],
'max_depth': [80, 90, 100, 110],
'max_features': [2, 3],
'min_samples_leaf': [3, 4, 5],
'min_samples_split': [8, 10, 12],
'n_estimators': [100, 200, 300, 1000]
}
rf = RandomForestRegressor()
grid_search = GridSearchCV(estimator = rf, param_grid = param_grid,
cv = 3, n_jobs = -1, verbose = 2 )<train_on_grid> | clf = LGBMClassifier(nthread=4,n_estimators=10000,learning_rate=0.02,num_leaves=34,colsample_bytree=0.9497036,subsample=0.8715623,max_depth=8,reg_alpha=0.041545473,reg_lambda=0.0735294,min_split_gain=0.0222415,min_child_weight=39.3259775,silent=-1,verbose=-1)
clf.fit(X_train, y_train, eval_set=[(X_train, y_train),(X_test, y_test)],
eval_metric= 'auc', verbose= 100, early_stopping_rounds= 200)
| Home Credit Default Risk |
6,017,848 | grid_search.fit(train[feats], train['nota_mat'] )<find_best_params> | Home Credit Default Risk | |
6,017,848 | <import_modules><EOS> | y_pred = clf.predict_proba(app_test, num_iteration=clf.best_iteration_)[:, 1]
submit = sk_id[['SK_ID_CURR']]
submit['TARGET'] = y_pred
submit.to_csv('sub1.csv', index = False)
| Home Credit Default Risk |
1,086,270 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<import_modules> | warnings.simplefilter(action='ignore', category=FutureWarning ) | Home Credit Default Risk |
1,086,270 | from sklearn.ensemble import RandomForestRegressor<choose_model_class> | def one_hot_encoder(df, nan_as_category = True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns | Home Credit Default Risk |
1,086,270 | rf = RandomForestRegressor(random_state=42, n_jobs=-1, min_samples_leaf=3,max_features=3,min_samples_split=8,bootstrap=True,max_depth=90,
n_estimators=200 )<train_model> | def application_train_test(num_rows = None, nan_as_category = True):
df = pd.read_csv('.. /input/application_train.csv', nrows= num_rows)
test_df = pd.read_csv('.. /input/application_test.csv', nrows= num_rows)
print("Train samples: {}, test samples: {}".format(len(df), len(test_df)))
df = df.append(test_df ).reset_index()
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = pd.factorize(df[bin_feature])
df, cat_cols = one_hot_encoder(df, nan_as_category)
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)
df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']
df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
del test_df
gc.collect()
return df | Home Credit Default Risk |
1,086,270 | rf.fit(train[feats], train['nota_mat'] )<import_modules> | def bureau_and_balance(num_rows = None, nan_as_category = True):
bureau = pd.read_csv('.. /input/bureau.csv', nrows = num_rows)
bb = pd.read_csv('.. /input/bureau_balance.csv', nrows = num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU' ).agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist() ])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(columns= 'SK_ID_BUREAU', inplace= True)
del bb, bb_agg
gc.collect()
num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'DAYS_CREDIT_UPDATE': ['min', 'max', 'mean'],
'AMT_ANNUITY': ['max', 'mean'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
}
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist() ])
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR' ).agg(num_aggregations)
active_agg.columns = pd.Index(['ACT_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')
del active, active_agg
gc.collect()
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR' ).agg(num_aggregations)
closed_agg.columns = pd.Index(['CLS_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')
del closed, closed_agg, bureau
gc.collect()
return bureau_agg
| Home Credit Default Risk |
1,086,270 | from sklearn.metrics import mean_squared_error<import_modules> | def previous_applications(num_rows = None, nan_as_category = True):
prev = pd.read_csv('.. /input/previous_application.csv', nrows = num_rows)
prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']
num_aggregations = {
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_APPLICATION': ['min', 'max', 'mean'],
'AMT_CREDIT': ['min', 'max', 'mean'],
'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],
'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],
'AMT_GOODS_PRICE': ['min', 'max', 'mean'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
}
cat_aggregations = {}
for cat in cat_cols:
cat_aggregations[cat] = ['mean']
prev_agg = prev.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist() ])
approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
approved_agg = approved.groupby('SK_ID_CURR' ).agg(num_aggregations)
approved_agg.columns = pd.Index(['APR_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist() ])
prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR')
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
refused_agg = refused.groupby('SK_ID_CURR' ).agg(num_aggregations)
refused_agg.columns = pd.Index(['REF_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist() ])
prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR')
del refused, refused_agg, approved, approved_agg, prev
gc.collect()
return prev_agg | Home Credit Default Risk |
1,086,270 | from sklearn.metrics import mean_squared_error<compute_train_metric> | def pos_cash(num_rows = None, nan_as_category = True):
pos = pd.read_csv('.. /input/POS_CASH_balance.csv', nrows = num_rows)
pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)
aggregations = {
'MONTHS_BALANCE': ['max', 'mean', 'size'],
'SK_DPD': ['max', 'mean'],
'SK_DPD_DEF': ['max', 'mean']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
pos_agg = pos.groupby('SK_ID_CURR' ).agg(aggregations)
pos_agg.columns = pd.Index(['POS_' + e[0] + "_" + e[1].upper() for e in pos_agg.columns.tolist() ])
pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR' ).size()
del pos
gc.collect()
return pos_agg | Home Credit Default Risk |
1,086,270 | mean_squared_error(rf.predict(valid[feats]), valid['nota_mat'])**(1/2 )<create_dataframe> | def bureau_and_balance(num_rows = None, nan_as_category = True):
bureau = pd.read_csv('.. /input/bureau.csv', nrows = num_rows)
bb = pd.read_csv('.. /input/bureau_balance.csv', nrows = num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU' ).agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist() ])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(columns= 'SK_ID_BUREAU', inplace= True)
del bb, bb_agg
gc.collect()
num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'DAYS_CREDIT_UPDATE': ['min', 'max', 'mean'],
'AMT_ANNUITY': ['max', 'mean'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
}
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist() ])
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR' ).agg(num_aggregations)
active_agg.columns = pd.Index(['ACT_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')
del active, active_agg
gc.collect()
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR' ).agg(num_aggregations)
closed_agg.columns = pd.Index(['CLS_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')
del closed, closed_agg, bureau
gc.collect()
return bureau_agg | Home Credit Default Risk |
1,086,270 | final=pd.DataFrame([np.exp(test.codigo_mun ).values,np.exp(rf.predict(test[feats])) ] ).T<rename_columns> | def installments_payments(num_rows = None, nan_as_category = True):
ins = pd.read_csv('.. /input/installments_payments.csv', nrows = num_rows)
ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)
ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']
ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']
ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']
ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']
ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)
ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)
aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DPD': ['max', 'mean', 'sum'],
'DBD': ['max', 'mean', 'sum'],
'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],
'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
ins_agg = ins.groupby('SK_ID_CURR' ).agg(aggregations)
ins_agg.columns = pd.Index(['INS_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist() ])
ins_agg['INS_COUNT'] = ins.groupby('SK_ID_CURR' ).size()
del ins
gc.collect()
return ins_agg | Home Credit Default Risk |
1,086,270 | final.columns=['codigo_mun','nota_mat']<load_from_csv> | def credit_card_balance(num_rows = None, nan_as_category = True):
cc = pd.read_csv('.. /input/credit_card_balance.csv', nrows = num_rows)
cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)
cc.drop(columns = ['SK_ID_PREV'], inplace = True)
cc_agg = cc.groupby('SK_ID_CURR' ).agg(['min', 'max', 'mean', 'sum', 'var'])
cc_agg.columns = pd.Index(['CC_' + e[0] + "_" + e[1].upper() for e in cc_agg.columns.tolist() ])
cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR' ).size()
del cc
gc.collect()
return cc_agg | Home Credit Default Risk |
1,086,270 | test2=pd.read_csv('.. /input/test.csv' )<define_variables> | def kfold_lightgbm(df, num_folds, stratified = False):
train_df = df[df['TARGET'].notnull() ]
test_df = df[df['TARGET'].isnull() ]
print("Starting LightGBM.Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape))
del df
gc.collect()
if stratified:
folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001)
else:
folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001)
oof_preds = np.zeros(train_df.shape[0])
sub_preds = np.zeros(test_df.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV']]
for n_fold,(train_idx, valid_idx)in enumerate(folds.split(train_df[feats], train_df['TARGET'])) :
train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]
valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]
clf = LGBMClassifier(
nthread=4,
n_estimators=10000,
learning_rate=0.01,
num_leaves=40,
colsample_bytree=0.9497036,
subsample=0.8715623,
max_depth=8,
reg_alpha=0.041545473,
reg_lambda=0.0735294,
min_split_gain=0.0222415,
min_child_weight=39.3259775,
silent=-1,
verbose=-1,)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)],
eval_metric= 'auc', verbose= 100, early_stopping_rounds= 100)
oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d AUC : %.6f' %(n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))
del clf, train_x, train_y, valid_x, valid_y
gc.collect()
print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))
test_df['TARGET'] = sub_preds
test_df[['SK_ID_CURR', 'TARGET']].to_csv(submission_file_name, index= False)
display_importances(feature_importance_df)
return feature_importance_df | Home Credit Default Risk |
1,086,270 | <define_variables><EOS> | def main(debug = False):
num_rows = 10000 if debug else None
df = application_train_test(num_rows)
with timer("Process bureau and bureau_balance"):
bureau = bureau_and_balance(num_rows)
print("Bureau df shape:", bureau.shape)
df = df.join(bureau, how='left', on='SK_ID_CURR')
del bureau
gc.collect()
with timer("Process previous_applications"):
prev = previous_applications(num_rows)
print("Previous applications df shape:", prev.shape)
df = df.join(prev, how='left', on='SK_ID_CURR')
del prev
gc.collect()
with timer("Process POS-CASH balance"):
pos = pos_cash(num_rows)
print("Pos-cash balance df shape:", pos.shape)
df = df.join(pos, how='left', on='SK_ID_CURR')
del pos
gc.collect()
with timer("Process installments payments"):
ins = installments_payments(num_rows)
print("Installments payments df shape:", ins.shape)
df = df.join(ins, how='left', on='SK_ID_CURR')
del ins
gc.collect()
with timer("Process credit card balance"):
cc = credit_card_balance(num_rows)
print("Credit card balance df shape:", cc.shape)
df = df.join(cc, how='left', on='SK_ID_CURR')
del cc
gc.collect()
with timer("Run LightGBM with kfold"):
feat_importance = kfold_lightgbm(df, num_folds= 5, stratified = False)
if __name__ == "__main__":
submission_file_name = "submission_kernel26.csv"
with timer("Full model run"):
main(debug= False ) | Home Credit Default Risk |
1,085,108 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<save_to_csv> | warnings.simplefilter(action='ignore', category=FutureWarning ) | Home Credit Default Risk |
1,085,108 | final.to_csv('Breno.csv',index=False )<set_options> | def one_hot_encoder(df, nan_as_category = True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns | Home Credit Default Risk |
1,085,108 | sns.set(style='white')
np.seterr(all='ignore')
np.random.seed(100)
LEVEL = 'level_4a'<compute_test_metric> | def application_train_test(num_rows = None, nan_as_category = True):
df = pd.read_csv('.. /input/application_train.csv', nrows= num_rows)
test_df = pd.read_csv('.. /input/application_test.csv', nrows= num_rows)
print("Train samples: {}, test samples: {}".format(len(df), len(test_df)))
df = df.append(test_df ).reset_index()
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = pd.factorize(df[bin_feature])
df, cat_cols = one_hot_encoder(df, nan_as_category)
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)
df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']
df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
del test_df
gc.collect()
return df | Home Credit Default Risk |
1,085,108 | class SigmoidNeuron:
def __init__(self):
self.w = None
self.b = None
def perceptron(self, x):
return np.dot(x, self.w.T)+ self.b
def sigmoid(self, x):
return 1.0/(1.0 + np.exp(-x))
def grad_w_mse(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
return(y_pred - y)* y_pred *(1 - y_pred)* x
def grad_b_mse(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
return(y_pred - y)* y_pred *(1 - y_pred)
def grad_w_ce(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
if y == 0:
return y_pred * x
elif y == 1:
return -1 *(1 - y_pred)* x
else:
raise ValueError("y should be 0 or 1")
def grad_b_ce(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
if y == 0:
return y_pred
elif y == 1:
return -1 *(1 - y_pred)
else:
raise ValueError("y should be 0 or 1")
def fit(self, X, Y, epochs=1, learning_rate=1, initialise=True, loss_fn="mse", display_loss=False):
if initialise:
self.w = np.random.randn(1, X.shape[1])
self.b = 0
if display_loss:
loss = {}
for i in tqdm_notebook(range(epochs), total=epochs, unit="epoch"):
dw = 0
db = 0
for x, y in zip(X, Y):
if loss_fn == "mse":
dw += self.grad_w_mse(x, y)
db += self.grad_b_mse(x, y)
elif loss_fn == "ce":
dw += self.grad_w_ce(x, y)
db += self.grad_b_ce(x, y)
self.w -= learning_rate * dw
self.b -= learning_rate * db
if display_loss:
Y_pred = self.sigmoid(self.perceptron(X))
if loss_fn == "mse":
loss[i] = mean_squared_error(Y, Y_pred)
elif loss_fn == "ce":
loss[i] = log_loss(Y, Y_pred)
if display_loss:
plt.plot(loss.values())
plt.xlabel('Epochs')
if loss_fn == "mse":
plt.ylabel('Mean Squared Error')
elif loss_fn == "ce":
plt.ylabel('Log Loss')
plt.show()
def predict(self, X):
Y_pred = []
for x in X:
y_pred = self.sigmoid(self.perceptron(x))
Y_pred.append(y_pred)
return np.array(Y_pred )<load_pretrained> | def bureau_and_balance(num_rows = None, nan_as_category = True):
bureau = pd.read_csv('.. /input/bureau.csv', nrows = num_rows)
bb = pd.read_csv('.. /input/bureau_balance.csv', nrows = num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU' ).agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist() ])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(columns= 'SK_ID_BUREAU', inplace= True)
del bb, bb_agg
gc.collect()
num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'DAYS_CREDIT_UPDATE': ['min', 'max', 'mean'],
'AMT_ANNUITY': ['max', 'mean'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
}
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist() ])
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR' ).agg(num_aggregations)
active_agg.columns = pd.Index(['ACT_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')
del active, active_agg
gc.collect()
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR' ).agg(num_aggregations)
closed_agg.columns = pd.Index(['CLS_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')
del closed, closed_agg, bureau
gc.collect()
return bureau_agg
| Home Credit Default Risk |
1,085,108 | languages = ['ta', 'hi', 'en']
images_train = read_all(".. /input/level_4a_train/"+LEVEL+"/"+"background", key_prefix='bgr_')
for language in languages:
images_train.update(read_all(".. /input/level_4a_train/"+LEVEL+"/"+language, key_prefix=language+"_"))
print(len(images_train))
images_test = read_all(".. /input/level_4a_test/kaggle_"+LEVEL, key_prefix='')
print(len(images_test))
<normalization> | def previous_applications(num_rows = None, nan_as_category = True):
prev = pd.read_csv('.. /input/previous_application.csv', nrows = num_rows)
prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']
num_aggregations = {
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_APPLICATION': ['min', 'max', 'mean'],
'AMT_CREDIT': ['min', 'max', 'mean'],
'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],
'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],
'AMT_GOODS_PRICE': ['min', 'max', 'mean'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
}
cat_aggregations = {}
for cat in cat_cols:
cat_aggregations[cat] = ['mean']
prev_agg = prev.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist() ])
approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
approved_agg = approved.groupby('SK_ID_CURR' ).agg(num_aggregations)
approved_agg.columns = pd.Index(['APR_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist() ])
prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR')
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
refused_agg = refused.groupby('SK_ID_CURR' ).agg(num_aggregations)
refused_agg.columns = pd.Index(['REF_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist() ])
prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR')
del refused, refused_agg, approved, approved_agg, prev
gc.collect()
return prev_agg | Home Credit Default Risk |
1,085,108 | scaler = StandardScaler()
X_scaled_train = scaler.fit_transform(X_train)
X_scaled_test = scaler.transform(X_test )<train_model> | def pos_cash(num_rows = None, nan_as_category = True):
pos = pd.read_csv('.. /input/POS_CASH_balance.csv', nrows = num_rows)
pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)
aggregations = {
'MONTHS_BALANCE': ['max', 'mean', 'size'],
'SK_DPD': ['max', 'mean'],
'SK_DPD_DEF': ['max', 'mean']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
pos_agg = pos.groupby('SK_ID_CURR' ).agg(aggregations)
pos_agg.columns = pd.Index(['POS_' + e[0] + "_" + e[1].upper() for e in pos_agg.columns.tolist() ])
pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR' ).size()
del pos
gc.collect()
return pos_agg | Home Credit Default Risk |
1,085,108 | sn_mse = SigmoidNeuron()
sn_mse.fit(X_scaled_train, Y_train, epochs=200, learning_rate=0.01, loss_fn="mse", display_loss=True )<train_model> | def bureau_and_balance(num_rows = None, nan_as_category = True):
bureau = pd.read_csv('.. /input/bureau.csv', nrows = num_rows)
bb = pd.read_csv('.. /input/bureau_balance.csv', nrows = num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU' ).agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist() ])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(columns= 'SK_ID_BUREAU', inplace= True)
del bb, bb_agg
gc.collect()
num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'DAYS_CREDIT_UPDATE': ['min', 'max', 'mean'],
'AMT_ANNUITY': ['max', 'mean'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
}
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist() ])
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR' ).agg(num_aggregations)
active_agg.columns = pd.Index(['ACT_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')
del active, active_agg
gc.collect()
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR' ).agg(num_aggregations)
closed_agg.columns = pd.Index(['CLS_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')
del closed, closed_agg, bureau
gc.collect()
return bureau_agg | Home Credit Default Risk |
1,085,108 | sn_ce = SigmoidNeuron()
sn_ce.fit(X_scaled_train, Y_train, epochs=200, learning_rate=0.01, loss_fn="ce", display_loss=True )<predict_on_test> | def installments_payments(num_rows = None, nan_as_category = True):
ins = pd.read_csv('.. /input/installments_payments.csv', nrows = num_rows)
ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)
ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']
ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']
ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']
ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']
ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)
ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)
aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DPD': ['max', 'mean', 'sum'],
'DBD': ['max', 'mean', 'sum'],
'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],
'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
ins_agg = ins.groupby('SK_ID_CURR' ).agg(aggregations)
ins_agg.columns = pd.Index(['INS_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist() ])
ins_agg['INS_COUNT'] = ins.groupby('SK_ID_CURR' ).size()
del ins
gc.collect()
return ins_agg | Home Credit Default Risk |
1,085,108 | def print_accuracy(sn):
Y_pred_train = sn.predict(X_scaled_train)
Y_pred_binarised_train =(Y_pred_train >= 0.5 ).astype("int" ).ravel()
accuracy_train = accuracy_score(Y_pred_binarised_train, Y_train)
print("Train Accuracy : ", accuracy_train)
print("-"*50 )<compute_test_metric> | def credit_card_balance(num_rows = None, nan_as_category = True):
cc = pd.read_csv('.. /input/credit_card_balance.csv', nrows = num_rows)
cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)
cc.drop(columns = ['SK_ID_PREV'], inplace = True)
cc_agg = cc.groupby('SK_ID_CURR' ).agg(['min', 'max', 'mean', 'sum', 'var'])
cc_agg.columns = pd.Index(['CC_' + e[0] + "_" + e[1].upper() for e in cc_agg.columns.tolist() ])
cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR' ).size()
del cc
gc.collect()
return cc_agg | Home Credit Default Risk |
1,085,108 | print_accuracy(sn_ce )<save_to_csv> | def kfold_lightgbm(df, num_folds, stratified = False):
train_df = df[df['TARGET'].notnull() ]
test_df = df[df['TARGET'].isnull() ]
print("Starting LightGBM.Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape))
del df
gc.collect()
if stratified:
folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001)
else:
folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001)
oof_preds = np.zeros(train_df.shape[0])
sub_preds = np.zeros(test_df.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV']]
for n_fold,(train_idx, valid_idx)in enumerate(folds.split(train_df[feats], train_df['TARGET'])) :
train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]
valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]
clf = LGBMClassifier(
nthread=4,
n_estimators=10000,
learning_rate=0.02,
num_leaves=34,
colsample_bytree=0.9497036,
subsample=0.8715623,
max_depth=8,
reg_alpha=0.041545473,
reg_lambda=0.0735294,
min_split_gain=0.0222415,
min_child_weight=39.3259775,
silent=-1,
verbose=-1,)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)],
eval_metric= 'auc', verbose= 100, early_stopping_rounds= 100)
oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d AUC : %.6f' %(n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))
del clf, train_x, train_y, valid_x, valid_y
gc.collect()
print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))
test_df['TARGET'] = sub_preds
test_df[['SK_ID_CURR', 'TARGET']].to_csv(submission_file_name, index= False)
display_importances(feature_importance_df)
return feature_importance_df | Home Credit Default Risk |
1,085,108 | <import_modules><EOS> | def main(debug = False):
num_rows = 10000 if debug else None
df = application_train_test(num_rows)
with timer("Process bureau and bureau_balance"):
bureau = bureau_and_balance(num_rows)
print("Bureau df shape:", bureau.shape)
df = df.join(bureau, how='left', on='SK_ID_CURR')
del bureau
gc.collect()
with timer("Process previous_applications"):
prev = previous_applications(num_rows)
print("Previous applications df shape:", prev.shape)
df = df.join(prev, how='left', on='SK_ID_CURR')
del prev
gc.collect()
with timer("Process POS-CASH balance"):
pos = pos_cash(num_rows)
print("Pos-cash balance df shape:", pos.shape)
df = df.join(pos, how='left', on='SK_ID_CURR')
del pos
gc.collect()
with timer("Process installments payments"):
ins = installments_payments(num_rows)
print("Installments payments df shape:", ins.shape)
df = df.join(ins, how='left', on='SK_ID_CURR')
del ins
gc.collect()
with timer("Process credit card balance"):
cc = credit_card_balance(num_rows)
print("Credit card balance df shape:", cc.shape)
df = df.join(cc, how='left', on='SK_ID_CURR')
del cc
gc.collect()
with timer("Run LightGBM with kfold"):
feat_importance = kfold_lightgbm(df, num_folds= 5, stratified = True)
if __name__ == "__main__":
submission_file_name = "submission_kernel26.csv"
with timer("Full model run"):
main(debug= False ) | Home Credit Default Risk |
1,085,720 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<load_from_csv> | warnings.simplefilter(action='ignore', category=FutureWarning ) | Home Credit Default Risk |
1,085,720 | df = pd.read_csv('.. /input/sputnik/train.csv')
df['Datetime'] = pd.to_datetime(df.epoch,format='%Y-%m-%d %H:%M:%S')
df.index = df.Datetime
df = df.drop(['epoch', 'Datetime'], axis=1)
df.head()<feature_engineering> | def one_hot_encoder(df, nan_as_category = True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns | Home Credit Default Risk |
1,085,720 | for sat_id in np.unique(df['sat_id'].values):
print(sat_id, end = ' ')
frame = df[df['sat_id'] == sat_id]
for v in ['x', 'y', 'z']:
e = frame[v].values
t = frame['type'].values
for i in range(len(frame[v])) :
if t[i] == 'test':
e[i] = e[i - 24] +(e[i - 24] - e[i - 48])
df[df['sat_id'] == sat_id] = frame<feature_engineering> | def application_train_test(num_rows = None, nan_as_category = True):
df = pd.read_csv('.. /input/application_train.csv', nrows= num_rows)
test_df = pd.read_csv('.. /input/application_test.csv', nrows= num_rows)
print("Train samples: {}, test samples: {}".format(len(df), len(test_df)))
df = df.append(test_df ).reset_index()
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = pd.factorize(df[bin_feature])
df, cat_cols = one_hot_encoder(df, nan_as_category)
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)
df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']
df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
del test_df
gc.collect()
return df | Home Credit Default Risk |
1,085,720 | df['error'] = np.linalg.norm(df[['x', 'y', 'z']].values - df[['x_sim', 'y_sim', 'z_sim']].values, axis=1 )<save_to_csv> | def bureau_and_balance(num_rows = None, nan_as_category = True):
bureau = pd.read_csv('.. /input/bureau.csv', nrows = num_rows)
bb = pd.read_csv('.. /input/bureau_balance.csv', nrows = num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU' ).agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist() ])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(columns= 'SK_ID_BUREAU', inplace= True)
del bb, bb_agg
gc.collect()
num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'DAYS_CREDIT_UPDATE': ['min', 'max', 'mean'],
'AMT_ANNUITY': ['max', 'mean'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
}
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist() ])
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR' ).agg(num_aggregations)
active_agg.columns = pd.Index(['ACT_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')
del active, active_agg
gc.collect()
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR' ).agg(num_aggregations)
closed_agg.columns = pd.Index(['CLS_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')
del closed, closed_agg, bureau
gc.collect()
return bureau_agg
| Home Credit Default Risk |
1,085,720 | ans = df[df['type'] == 'test'][['id', 'error']]
ans
ans.to_csv('ans.csv', index=False )<import_modules> | def previous_applications(num_rows = None, nan_as_category = True):
prev = pd.read_csv('.. /input/previous_application.csv', nrows = num_rows)
prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']
num_aggregations = {
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_APPLICATION': ['min', 'max', 'mean'],
'AMT_CREDIT': ['min', 'max', 'mean'],
'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],
'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],
'AMT_GOODS_PRICE': ['min', 'max', 'mean'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
}
cat_aggregations = {}
for cat in cat_cols:
cat_aggregations[cat] = ['mean']
prev_agg = prev.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist() ])
approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
approved_agg = approved.groupby('SK_ID_CURR' ).agg(num_aggregations)
approved_agg.columns = pd.Index(['APR_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist() ])
prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR')
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
refused_agg = refused.groupby('SK_ID_CURR' ).agg(num_aggregations)
refused_agg.columns = pd.Index(['REF_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist() ])
prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR')
del refused, refused_agg, approved, approved_agg, prev
gc.collect()
return prev_agg | Home Credit Default Risk |
1,085,720 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns<set_options> | def pos_cash(num_rows = None, nan_as_category = True):
pos = pd.read_csv('.. /input/POS_CASH_balance.csv', nrows = num_rows)
pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)
aggregations = {
'MONTHS_BALANCE': ['max', 'mean', 'size'],
'SK_DPD': ['max', 'mean'],
'SK_DPD_DEF': ['max', 'mean']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
pos_agg = pos.groupby('SK_ID_CURR' ).agg(aggregations)
pos_agg.columns = pd.Index(['POS_' + e[0] + "_" + e[1].upper() for e in pos_agg.columns.tolist() ])
pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR' ).size()
del pos
gc.collect()
return pos_agg | Home Credit Default Risk |
1,085,720 | %matplotlib inline<import_modules> | def bureau_and_balance(num_rows = None, nan_as_category = True):
bureau = pd.read_csv('.. /input/bureau.csv', nrows = num_rows)
bb = pd.read_csv('.. /input/bureau_balance.csv', nrows = num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU' ).agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist() ])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(columns= 'SK_ID_BUREAU', inplace= True)
del bb, bb_agg
gc.collect()
num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'DAYS_CREDIT_UPDATE': ['min', 'max', 'mean'],
'AMT_ANNUITY': ['max', 'mean'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
}
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist() ])
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR' ).agg(num_aggregations)
active_agg.columns = pd.Index(['ACT_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')
del active, active_agg
gc.collect()
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR' ).agg(num_aggregations)
closed_agg.columns = pd.Index(['CLS_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')
del closed, closed_agg, bureau
gc.collect()
return bureau_agg | Home Credit Default Risk |
1,085,720 | from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.tokenize import TreebankWordTokenizer
import re
import string<import_modules> | def installments_payments(num_rows = None, nan_as_category = True):
ins = pd.read_csv('.. /input/installments_payments.csv', nrows = num_rows)
ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)
ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']
ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']
ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']
ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']
ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)
ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)
aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DPD': ['max', 'mean', 'sum'],
'DBD': ['max', 'mean', 'sum'],
'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],
'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
ins_agg = ins.groupby('SK_ID_CURR' ).agg(aggregations)
ins_agg.columns = pd.Index(['INS_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist() ])
ins_agg['INS_COUNT'] = ins.groupby('SK_ID_CURR' ).size()
del ins
gc.collect()
return ins_agg | Home Credit Default Risk |
1,085,720 | from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn import metrics<import_modules> | def credit_card_balance(num_rows = None, nan_as_category = True):
cc = pd.read_csv('.. /input/credit_card_balance.csv', nrows = num_rows)
cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)
cc.drop(columns = ['SK_ID_PREV'], inplace = True)
cc_agg = cc.groupby('SK_ID_CURR' ).agg(['min', 'max', 'mean', 'sum', 'var'])
cc_agg.columns = pd.Index(['CC_' + e[0] + "_" + e[1].upper() for e in cc_agg.columns.tolist() ])
cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR' ).size()
del cc
gc.collect()
return cc_agg | Home Credit Default Risk |
1,085,720 | from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import ComplementNB<import_modules> | def kfold_lightgbm(df, num_folds, stratified = False):
train_df = df[df['TARGET'].notnull() ]
test_df = df[df['TARGET'].isnull() ]
print("Starting LightGBM.Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape))
del df
gc.collect()
if stratified:
folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001)
else:
folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001)
oof_preds = np.zeros(train_df.shape[0])
sub_preds = np.zeros(test_df.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV']]
for n_fold,(train_idx, valid_idx)in enumerate(folds.split(train_df[feats], train_df['TARGET'])) :
train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]
valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]
clf = LGBMClassifier(
nthread=4,
n_estimators=10000,
learning_rate=0.02,
num_leaves=34,
colsample_bytree=0.9497036,
subsample=0.8715623,
max_depth=8,
reg_alpha=0.041545473,
reg_lambda=0.0735294,
min_split_gain=0.0222415,
min_child_weight=39.3259775,
silent=-1,
verbose=-1,)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)],
eval_metric= 'auc', verbose= 100, early_stopping_rounds= 100)
oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d AUC : %.6f' %(n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))
del clf, train_x, train_y, valid_x, valid_y
gc.collect()
print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))
test_df['TARGET'] = sub_preds
test_df[['SK_ID_CURR', 'TARGET']].to_csv(submission_file_name, index= False)
display_importances(feature_importance_df)
return feature_importance_df | Home Credit Default Risk |
1,085,720 | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer<set_options> | def main(debug = False):
num_rows = 10000 if debug else None
df = application_train_test(num_rows)
with timer("Process bureau and bureau_balance"):
bureau = bureau_and_balance(num_rows)
print("Bureau df shape:", bureau.shape)
df = df.join(bureau, how='left', on='SK_ID_CURR')
del bureau
gc.collect()
with timer("Process previous_applications"):
prev = previous_applications(num_rows)
print("Previous applications df shape:", prev.shape)
df = df.join(prev, how='left', on='SK_ID_CURR')
del prev
gc.collect()
with timer("Process POS-CASH balance"):
pos = pos_cash(num_rows)
print("Pos-cash balance df shape:", pos.shape)
df = df.join(pos, how='left', on='SK_ID_CURR')
del pos
gc.collect()
with timer("Process installments payments"):
ins = installments_payments(num_rows)
print("Installments payments df shape:", ins.shape)
df = df.join(ins, how='left', on='SK_ID_CURR')
del ins
gc.collect()
with timer("Process credit card balance"):
cc = credit_card_balance(num_rows)
print("Credit card balance df shape:", cc.shape)
df = df.join(cc, how='left', on='SK_ID_CURR')
del cc
gc.collect()
with timer("Run LightGBM with kfold"):
feat_importance = kfold_lightgbm(df, num_folds= 5, stratified = False)
if __name__ == "__main__":
submission_file_name = "submission_kernel26.csv"
with timer("Full model run"):
main(debug= False ) | Home Credit Default Risk |
1,085,463 | pd.options.display.max_columns = 1000
pd.options.display.max_rows = 1000<load_from_csv> | PATH = ".. /input"
list_of_files = os.listdir(PATH)
application_train = pd.read_csv(PATH+"/application_train.csv")
application_test = pd.read_csv(PATH+"/application_test.csv")
bureau = pd.read_csv(PATH+"/bureau.csv")
bureau_balance = pd.read_csv(PATH+"/bureau_balance.csv")
credit_card_balance = pd.read_csv(PATH+"/credit_card_balance.csv")
installments_payments = pd.read_csv(PATH+"/installments_payments.csv")
previous_application = pd.read_csv(PATH+"/previous_application.csv")
POS_CASH_balance = pd.read_csv(PATH+"/POS_CASH_balance.csv" ) | Home Credit Default Risk |
1,085,463 | train = pd.read_csv('.. /input/train.csv', encoding='utf-8')
test = pd.read_csv('.. /input/test.csv', encoding='utf-8', index_col='id')
example = pd.read_csv('.. /input/random_example.csv', encoding='utf-8', index_col='id' )<train_model> | total_IDS = np.concatenate(( application_test["SK_ID_CURR"].values, application_train["SK_ID_CURR"].values))
print(len(np.unique(np.array(total_IDS)))== len(total_IDS)) | Home Credit Default Risk |
1,085,463 | print('Training data size: ', train.shape[0])
print('Test data size: ', test.shape[0] )<feature_engineering> | POS_CASH_balance_IDS = POS_CASH_balance["SK_ID_CURR"].values
bureau_IDS = bureau["SK_ID_CURR"].values
credit_card_balance_IDS = credit_card_balance["SK_ID_CURR"].values
installments_payments_IDS = installments_payments["SK_ID_CURR"].values
previous_application_IDS = previous_application["SK_ID_CURR"].values
tot = len(total_IDS)
print(tot)
print(len(np.intersect1d(POS_CASH_balance_IDS, total_IDS)) /tot*100,
len(np.intersect1d(bureau_IDS, total_IDS)) /tot*100,
len(np.intersect1d(credit_card_balance_IDS, total_IDS)) /tot*100,
len(np.intersect1d(installments_payments_IDS, total_IDS)) /tot*100,
len(np.intersect1d(previous_application_IDS, total_IDS)) /tot*100 ) | Home Credit Default Risk |
1,085,463 | train = train.reset_index()
train = train.rename(columns={'index': 'id'})
train['id'] = train['id']+test.shape[0]+1
test['type'] = '????'<concatenate> | prev = previous_application["SK_ID_PREV"].values
POS_CASH_balance_IDS_prev = POS_CASH_balance["SK_ID_PREV"].values
credit_card_balance_IDS_prev = credit_card_balance["SK_ID_PREV"].values
installments_payments_IDS_prev = installments_payments["SK_ID_PREV"].values
prev_num = len(prev)
print(prev_num)
print(len(np.intersect1d(POS_CASH_balance_IDS_prev, prev)) /prev_num*100,
len(np.intersect1d(credit_card_balance_IDS_prev, prev)) /prev_num*100,
len(np.intersect1d(installments_payments_IDS_prev, prev)) /prev_num*100)
| Home Credit Default Risk |
1,085,463 | combined = pd.concat([test.reset_index() , train], sort=False)
combined.set_index('id', inplace=True )<count_missing_values> | bureau_br = np.unique(bureau["SK_ID_BUREAU"].values)
print(len(np.intersect1d(np.unique(bureau_balance["SK_ID_BUREAU"].values), bureau_br)) /len(bureau_br)*100 ) | Home Credit Default Risk |
1,085,463 | combined.isna().sum()<define_variables> | breau_total = np.unique(np.intersect1d(bureau_IDS, total_IDS))
bureau_filtered = bureau.loc[bureau["SK_ID_CURR"].isin(breau_total)]
b = np.intersect1d(np.unique(bureau_filtered["SK_ID_BUREAU"].values), np.unique(bureau_balance["SK_ID_BUREAU"].values))
bureau_filtered = bureau_filtered.loc[bureau_filtered["SK_ID_BUREAU"].isin(b)]
len(bureau_filtered["SK_ID_CURR"].values)
bureau_filtered | Home Credit Default Risk |
1,085,463 | labels = ['mind','energy','nature','tactics']
label_letters = ['E','N','T','J']
alt_label_letters = ['I', 'S', 'F', 'P']<categorify> | print(len(np.unique(bureau_filtered["SK_ID_CURR"].values)) /tot*100 ) | Home Credit Default Risk |
1,085,463 | def convert_type_to_int(df):
for i in range(len(labels)) :
df[labels[i]] = df['type'].apply\
(lambda x: x[i] == label_letters[i] ).astype('int')
return df<create_dataframe> | train = application_train.drop(["TARGET"], axis = 1)
train_target = application_train["TARGET"]
test= application_test.copy()
tr = len(application_train)
print(all(i ==True for i in train.columns==test.columns))
| Home Credit Default Risk |
1,085,463 | data = combined<data_type_conversions> | df = pd.concat([train, test])
del train, test, application_train, application_test
gc.collect()
def categorical_features(data):
features = [i for i in list(data.columns)if data[i].dtype == 'object']
return features
categorical = categorical_features(df)
numerical = [i for i in df.columns if i not in categorical]
numerical.remove("SK_ID_CURR")
IDs = df["SK_ID_CURR"]
| Home Credit Default Risk |
1,085,463 | data = convert_type_to_int(data )<string_transform> | for feature in categorical:
df[feature].fillna("unidentified")
print(f'Transforming {feature}...')
encoder = LabelEncoder()
encoder.fit(df[feature].astype(str))
df[feature] = encoder.transform(df[feature].astype(str))
df.head() | Home Credit Default Risk |
1,085,463 | def separate_posts(post):
return ' '.join(post.split('|||'))<categorify> | for feats in df.columns:
df[feats] = df[feats].fillna(-1)
df.head() | Home Credit Default Risk |
1,085,463 | def remove_urls(post):
pattern_url = r'http[s]?://(?:[A-Za-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9A-Fa-f][0-9A-Fa-f])) +'
subs_url = r' '
return re.sub(pattern_url, subs_url, post )<define_variables> | POS_CASH_balance_G1 = POS_CASH_balance.loc[POS_CASH_balance["SK_ID_CURR"].isin(total_IDS)]
print(len(np.unique(POS_CASH_balance_G1["SK_ID_CURR"].values)))
POS_CASH_balance_G1.head() | Home Credit Default Risk |
1,085,463 | def remove_numbers(post):
p_numbers = '0123456789'
return ''.join([l for l in post if l not in p_numbers] )<string_transform> | np.unique(POS_CASH_balance_G1["NAME_CONTRACT_STATUS"].values)
POS_CASH_balance_G1_num =(POS_CASH_balance_G1.groupby("SK_ID_CURR", as_index=False ).mean())
nb = POS_CASH_balance_G1[["SK_ID_CURR", "NAME_CONTRACT_STATUS"]].groupby("SK_ID_CURR", as_index = False ).count()
nb["num_in_POS_CASH"] = nb["NAME_CONTRACT_STATUS"]
df = df.merge(POS_CASH_balance_G1_num.drop("SK_ID_PREV", axis = 1), on='SK_ID_CURR', how='left' ).fillna(-1)
df = df.merge(nb.drop("NAME_CONTRACT_STATUS", axis = 1), on='SK_ID_CURR', how='left' ).fillna(-1)
del nb, POS_CASH_balance_G1_num, POS_CASH_balance_G1
gc.collect()
df.head()
| Home Credit Default Risk |
1,085,463 | def remove_punctuation(post):
return ''.join([l for l in post if l not in string.punctuation] )<define_variables> | bureau_G1 = bureau.drop(["SK_ID_BUREAU"], axis = 1 ).loc[bureau["SK_ID_CURR"].isin(total_IDS)]
print(len(np.unique(bureau_G1["SK_ID_CURR"].values)))
bureau_G1.head() | Home Credit Default Risk |
1,085,463 | def remove_strange(post):
return post.replace("‘", '' ).replace("’", '' ).replace("'", '' )<string_transform> | bureau_G1_num =(bureau_G1.groupby("SK_ID_CURR", as_index=False ).mean())
nb = bureau_G1[["SK_ID_CURR", "CREDIT_ACTIVE"]].groupby("SK_ID_CURR", as_index = False ).count()
nb["num_in_bureau"] = nb["CREDIT_ACTIVE"]
df = df.merge(bureau_G1_num, on='SK_ID_CURR', how='left' ).fillna(-1)
df = df.merge(nb.drop("CREDIT_ACTIVE", axis=1), on='SK_ID_CURR', how='left' ).fillna(-1)
del nb, bureau_G1_num, bureau_G1
gc.collect()
df.head() | Home Credit Default Risk |
1,085,463 | def lower_case(post):
return post.lower()<drop_column> | credit_card_balance_G1 = credit_card_balance.drop(["SK_ID_PREV"], axis = 1 ).loc[credit_card_balance["SK_ID_CURR"].isin(total_IDS)]
print(len(np.unique(credit_card_balance_G1["SK_ID_CURR"].values)))
credit_card_balance_G1.head() | Home Credit Default Risk |
1,085,463 | def remove_extra_spaces(post):
return re.sub('\s+', ' ', post )<string_transform> | credit_card_balance_G1_num =(credit_card_balance_G1.groupby("SK_ID_CURR", as_index=False ).mean())
nb = credit_card_balance_G1[["SK_ID_CURR", "NAME_CONTRACT_STATUS"]].groupby("SK_ID_CURR", as_index = False ).count()
nb["num_in_credit_card"] = nb["NAME_CONTRACT_STATUS"]
df = df.merge(credit_card_balance_G1_num, on='SK_ID_CURR', how='left' ).fillna(-1)
df = df.merge(nb.drop("NAME_CONTRACT_STATUS", axis=1), on='SK_ID_CURR', how='left' ).fillna(-1)
del nb, credit_card_balance_G1_num, credit_card_balance_G1
gc.collect()
df.head() | Home Credit Default Risk |
1,085,463 | def tokenizer_func(post):
return TreebankWordTokenizer().tokenize(post )<define_variables> | installments_payments_G1 = installments_payments.drop(["SK_ID_PREV"], axis = 1 ).loc[installments_payments["SK_ID_CURR"].isin(total_IDS)]
print(len(np.unique(installments_payments_G1["SK_ID_CURR"].values)))
installments_payments_G1.head() | Home Credit Default Risk |
1,085,463 | custom_stop_words = [remove_punctuation(word)for word in stopwords.words('english')[39:]]<drop_column> | installments_payments_G1_num =(installments_payments_G1.groupby("SK_ID_CURR", as_index=False ).mean())
nb = installments_payments_G1[["SK_ID_CURR", "NUM_INSTALMENT_VERSION"]].groupby("SK_ID_CURR", as_index = False ).count()
nb["num_in_install_pay"] = nb["NUM_INSTALMENT_VERSION"]
df = df.merge(installments_payments_G1_num, on='SK_ID_CURR', how='left' ).fillna(-1)
df = df.merge(nb.drop("NUM_INSTALMENT_VERSION", axis=1), on='SK_ID_CURR', how='left' ).fillna(-1)
del nb, installments_payments_G1_num, installments_payments_G1
gc.collect()
df.head() | Home Credit Default Risk |
1,085,463 | def remove_stop_words(tokens, stop_words=custom_stop_words):
return [token for token in tokens if(token not in stop_words)and(len(token)<15)]<compute_test_metric> | previous_application_G1 = previous_application.drop(["SK_ID_PREV"], axis = 1 ).loc[previous_application["SK_ID_CURR"].isin(total_IDS)]
print(len(np.unique(previous_application_G1["SK_ID_CURR"].values)))
previous_application_G1.head() | Home Credit Default Risk |
1,085,463 | def lem_func(words, lemma = WordNetLemmatizer()):
return [lemma.lemmatize(word)for word in words if word not in custom_stop_words]<string_transform> | previous_application_G1_num =(previous_application_G1.groupby("SK_ID_CURR", as_index=False ).mean())
nb = previous_application_G1[["SK_ID_CURR", "NAME_CONTRACT_TYPE"]].groupby("SK_ID_CURR", as_index = False ).count()
nb["num_in_previous_app"] = nb["NAME_CONTRACT_TYPE"]
df = df.merge(previous_application_G1_num, on='SK_ID_CURR', how='left' ).fillna(-1)
df = df.merge(nb.drop("NAME_CONTRACT_TYPE", axis=1), on='SK_ID_CURR', how='left' ).fillna(-1)
del nb, previous_application_G1_num, previous_application_G1
gc.collect()
df.head() | Home Credit Default Risk |
1,085,463 | def join_tokens(post):
return ' '.join(post )<drop_column> | train_X = df[:tr].drop("SK_ID_CURR", axis = 1)
test_X = df[tr:].drop("SK_ID_CURR", axis = 1)
train_X["TARGET"] = train_target
y = train_target
folds = KFold(n_splits=5, shuffle=True, random_state=42)
oof_preds = np.zeros(train_X.shape[0])
sub_preds = np.zeros(test_X.shape[0])
feats = [f for f in train_X.columns if f not in ['SK_ID_CURR','TARGET']] | Home Credit Default Risk |
1,085,463 | def clean_data(df, col, cleaning_funcs):
df['posts_processed'] = df[col].apply(separate_posts)
for func in cleaning_funcs:
df['posts_processed'] = df['posts_processed'].apply(func)
return df<count_values> | for n_fold,(trn_idx, val_idx)in enumerate(folds.split(train_X)) :
trn_x, trn_y = train_X[feats].iloc[trn_idx], train_X.iloc[trn_idx]['TARGET']
val_x, val_y = train_X[feats].iloc[val_idx], train_X.iloc[val_idx]['TARGET']
clf = LGBMClassifier(
n_estimators=10000,
learning_rate=0.01,
num_leaves=30,
colsample_bytree=.8,
subsample=.9,
max_depth=7,
reg_alpha=.1,
reg_lambda=.1,
min_split_gain=.01,
min_child_weight=100,
silent=-1,
verbose=-1,
)
clf.fit(trn_x, trn_y,
eval_set= [(trn_x, trn_y),(val_x, val_y)],
eval_metric='auc', verbose=100, early_stopping_rounds=100
)
oof_preds[val_idx] = clf.predict_proba(val_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test_X[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
print('Fold %2d AUC : %.6f' %(n_fold + 1, roc_auc_score(val_y, oof_preds[val_idx])))
del trn_x, trn_y, val_x, val_y
gc.collect() | Home Credit Default Risk |
1,085,463 | for label in labels:
train['naive_'+label] = train[label].value_counts().index[0]<compute_test_metric> | submission = pd.read_csv(".. /input/sample_submission.csv")
submission['TARGET'] = sub_preds
submission.to_csv("baseline2.csv", index=False)
submission.head() | Home Credit Default Risk |
1,012,040 | naive_results = pd.DataFrame(data=[], index = ['Naive_Accuracy'], columns = labels)
for label in labels:
naive_results[label] = metrics.accuracy_score(train[label], train['naive_'+label] )<compute_test_metric> | df_test = pd.read_csv(".. /input/application_test.csv")
df_train = pd.read_csv(".. /input/application_train.csv" ) | Home Credit Default Risk |
1,012,040 | for label in labels:
print('
Confusion matrix for ' + label + ' is:')
print(metrics.confusion_matrix(train[label], train['naive_'+label]))<drop_column> | def kfold_lightgbm(df, num_folds=5, stratified = False, debug= False):
train_df = df[df['TARGET'].notnull() ]
test_df = df[df['TARGET'].isnull() ]
print("Starting LightGBM.Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape))
del df
gc.collect()
if stratified:
folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001)
else:
folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001)
oof_preds = np.zeros(train_df.shape[0])
sub_preds = np.zeros(test_df.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]
for n_fold,(train_idx, valid_idx)in enumerate(folds.split(train_df[feats], train_df['TARGET'])) :
train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]
valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]
clf = LGBMClassifier(
nthread=4,
n_estimators=1000,
learning_rate=0.02,
num_leaves=34,
colsample_bytree=0.9497036,
subsample=0.8715623,
max_depth=8,
reg_alpha=0.041545473,
reg_lambda=0.0735294,
min_split_gain=0.0222415,
min_child_weight=39.3259775,
silent=-1,
verbose=-1,)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)],
eval_metric= 'auc', verbose= 100, early_stopping_rounds= 200)
oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d AUC : %.6f' %(n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))
del clf, train_x, train_y, valid_x, valid_y
gc.collect()
print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))
test_df['TARGET'] = sub_preds
display_importances(feature_importance_df)
return test_df, feature_importance_df
def display_importances(feature_importance_df_):
cols = feature_importance_df_[["feature", "importance"]].groupby("feature" ).mean().sort_values(by="importance", ascending=False)[:40].index
best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
plt.figure(figsize=(8, 10))
sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False))
plt.title('LightGBM Features(avg over folds)')
plt.tight_layout()
plt.savefig('lgbm_importances01.png' ) | Home Credit Default Risk |
1,012,040 | data = clean_data(data, 'posts', cleaning_funcs )<prepare_x_and_y> | def installments_payments(df):
aggs = ['sum', 'mean', 'max', 'min']
feat_cols = ['NUM_INSTALMENT_NUMBER', 'AMT_INSTALMENT', 'AMT_PAYMENT']
agg_dict = {k:aggs for k in feat_cols}
df_agg = df.groupby('SK_ID_CURR' ).agg(agg_dict)
df_agg.columns = [col + '_' + agg.upper() + '_INSTALL' for col in feat_cols for agg in agg_dict[col]]
return df_agg
def pos_cash_balance(df):
df['COMPLETED_CONTRACTS'] =(df['NAME_CONTRACT_STATUS'] == 'Active' ).astype(int)
aggs = ['sum', 'mean']
feat_cols = ['CNT_INSTALMENT', 'CNT_INSTALMENT_FUTURE', 'SK_DPD', 'COMPLETED_CONTRACTS']
agg_dict = {k:aggs for k in feat_cols}
agg_dict['COMPLETED_CONTRACTS'] = ['mean', 'size']
df_agg = df.groupby('SK_ID_CURR' ).agg(agg_dict)
df_agg.columns = [col + '_' + agg.upper() + '_POS' for col in feat_cols for agg in agg_dict[col]]
return df_agg
def credit_card_balance(df):
df['PCT_CREDIT_LIMIT'] = df['AMT_BALANCE']/df['AMT_CREDIT_LIMIT_ACTUAL']
aggs = ['sum', 'mean', 'max']
feat_cols = ['PCT_CREDIT_LIMIT', 'AMT_CREDIT_LIMIT_ACTUAL', 'AMT_DRAWINGS_ATM_CURRENT',
'AMT_DRAWINGS_CURRENT', 'AMT_DRAWINGS_OTHER_CURRENT', 'AMT_DRAWINGS_POS_CURRENT',
'AMT_INST_MIN_REGULARITY', 'AMT_PAYMENT_TOTAL_CURRENT', 'AMT_RECEIVABLE_PRINCIPAL',
'AMT_RECIVABLE', 'AMT_TOTAL_RECEIVABLE', 'CNT_DRAWINGS_ATM_CURRENT',
'CNT_INSTALMENT_MATURE_CUM', 'SK_DPD']
agg_dict = {k:aggs for k in feat_cols}
agg_dict['PCT_CREDIT_LIMIT'] = ['mean', 'max']
df_agg = df.groupby('SK_ID_CURR' ).agg(agg_dict)
df_agg.columns = [col + '_' + agg.upper() + '_CREDIT' for col in feat_cols for agg in agg_dict[col]]
return df_agg
def bureau(df):
df['CREDIT_ACTIVE_NUM'] =(df.CREDIT_ACTIVE == 'ACTIVE' ).astype(int)
aggs = ['sum', 'mean', 'max']
feat_cols = ['CREDIT_DAY_OVERDUE', 'CNT_CREDIT_PROLONG', 'AMT_CREDIT_MAX_OVERDUE',
'AMT_CREDIT_SUM',
'AMT_CREDIT_SUM_LIMIT', 'AMT_CREDIT_SUM_OVERDUE', 'AMT_ANNUITY',
'CREDIT_ACTIVE_NUM', 'AMT_CREDIT_SUM_DEBT']
agg_dict = {k:aggs for k in feat_cols}
agg_dict['CREDIT_ACTIVE_NUM'] = ['sum', 'mean', 'size']
df_agg = df.groupby('SK_ID_CURR' ).agg(agg_dict)
df_agg.columns = [col + '_' + agg.upper() + '_BUREAU' for col in feat_cols for agg in agg_dict[col]]
return df_agg
def application_train(df):
feat_num_cols = ['CNT_CHILDREN', 'AMT_INCOME_TOTAL', 'AMT_CREDIT', 'AMT_ANNUITY',
'AMT_GOODS_PRICE', 'REGION_POPULATION_RELATIVE', 'DAYS_BIRTH',
'DAYS_EMPLOYED', 'DAYS_REGISTRATION', 'OWN_CAR_AGE', 'CNT_FAM_MEMBERS',
'EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']
feat_binary_cols = ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']
for col in feat_binary_cols:
df[col] = df[col].factorize() [0]
return df[['SK_ID_CURR', 'TARGET'] + feat_num_cols + feat_binary_cols].set_index('SK_ID_CURR')
def previous_application(df):
df['CONTRACTS_REFUSED'] =(df['NAME_CONTRACT_STATUS'] == 'REFUSED' ).astype(int)
aggs = ['sum', 'mean', 'max']
feat_cols = ['AMT_ANNUITY', 'AMT_APPLICATION', 'AMT_CREDIT', 'AMT_DOWN_PAYMENT',
'AMT_GOODS_PRICE', 'RATE_DOWN_PAYMENT', 'RATE_INTEREST_PRIMARY',
'RATE_INTEREST_PRIVILEGED', 'CNT_PAYMENT', 'CONTRACTS_REFUSED']
agg_dict = {k:aggs for k in feat_cols}
df_agg = df.groupby('SK_ID_CURR' ).agg(agg_dict)
df_agg.columns = [col + '_' + agg.upper() + '_PREV' for col in feat_cols for agg in aggs]
return df_agg
def crossval_predict(df_train, df_test, fit_predictor,
previous_app_features=True,
bureau_features=True,
credit_features=True,
pos_features=True,
install_features=True):
hold = []
hold.append(application_train(pd.concat([df_train, df_test])))
del(df_train, df_test)
gc.collect()
if previous_app_features:
df_prev = pd.read_csv('.. /input/previous_application.csv')
hold.append(previous_application(df_prev))
del(df_prev)
gc.collect()
if bureau_features:
df_bureau = pd.read_csv('.. /input/bureau.csv')
hold.append(bureau(df_bureau))
del(df_bureau)
gc.collect()
if credit_features:
df_credit = pd.read_csv('.. /input/credit_card_balance.csv')
hold.append(credit_card_balance(df_credit))
del(df_credit)
gc.collect()
if pos_features:
df_pos = pd.read_csv('.. /input/POS_CASH_balance.csv')
hold.append(pos_cash_balance(df_pos))
del(df_pos)
gc.collect()
if install_features:
df_install = pd.read_csv('.. /input/installments_payments.csv')
hold.append(installments_payments(df_install))
del(df_install)
gc.collect()
df_test, feat_importance = fit_predictor(pd.concat(hold, axis=1))
del(hold)
gc.collect()
return df_test[['TARGET']].reset_index()
| Home Credit Default Risk |
1,012,040 | <feature_engineering><EOS> | %%time
output = crossval_predict(df_train,
df_test,
kfold_lightgbm)
output.to_csv('extratrees_simple.csv', index=False ) | Home Credit Default Risk |
1,072,962 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<prepare_x_and_y> | gc.enable() | Home Credit Default Risk |
1,072,962 | X1 = X_countV[len(test):]
X2 = X_tfidfV[len(test):]
y = data[len(test):]<compute_test_metric> | buro_bal = pd.read_csv('.. /input/bureau_balance.csv')
print('Buro bal shape : ', buro_bal.shape ) | Home Credit Default Risk |
1,072,962 | logR = LogisticRegression(solver='lbfgs',n_jobs=-1)
evaluate(logR, [X1,X2], y )<compute_test_metric> | print('transform to dummies')
buro_bal = pd.concat([buro_bal, pd.get_dummies(buro_bal.STATUS, prefix='buro_bal_status')], axis=1 ).drop('STATUS', axis=1)
| Home Credit Default Risk |
1,072,962 | logR = LogisticRegression(class_weight='balanced', solver='lbfgs', n_jobs=-1)
evaluate(logR, [X1, X2], y )<compute_train_metric> | print('Counting buros')
buro_counts = buro_bal[['SK_ID_BUREAU', 'MONTHS_BALANCE']].groupby('SK_ID_BUREAU' ).count()
buro_bal['buro_count'] = buro_bal['SK_ID_BUREAU'].map(buro_counts['MONTHS_BALANCE'] ) | Home Credit Default Risk |
1,072,962 | knn = KNeighborsClassifier()
evaluate(knn, [0, X2], y )<feature_engineering> | print('averaging buro bal')
avg_buro_bal = buro_bal.groupby('SK_ID_BUREAU' ).mean()
| Home Credit Default Risk |
1,072,962 | tfidf = TfidfVectorizer(max_features=1000, min_df=2, max_df=0.9)
X_tfidfV = tfidf.fit_transform(data['posts_processed'] )<filter> | avg_buro_bal.columns = ['avg_buro_' + f_ for f_ in avg_buro_bal.columns]
del buro_bal
gc.collect() | Home Credit Default Risk |
1,072,962 | X3 = X_tfidfV[len(test):]<choose_model_class> | print('Read Bureau')
buro = pd.read_csv('.. /input/bureau.csv' ) | Home Credit Default Risk |
1,072,962 | ks = [3, 5, 10]
param_grid = {'n_neighbors': ks}
grid_knn = GridSearchCV(KNeighborsClassifier() , param_grid, scoring='f1', cv=5, return_train_score=False )<train_on_grid> | print('Go to dummies')
buro_credit_active_dum = pd.get_dummies(buro.CREDIT_ACTIVE, prefix='ca_')
buro_credit_currency_dum = pd.get_dummies(buro.CREDIT_CURRENCY, prefix='cu_')
buro_credit_type_dum = pd.get_dummies(buro.CREDIT_TYPE, prefix='ty_')
| Home Credit Default Risk |
1,072,962 | grid_knn.fit(X3, y[labels[0]] )<create_dataframe> | buro_full = pd.concat([buro, buro_credit_active_dum, buro_credit_currency_dum, buro_credit_type_dum], axis=1 ) | Home Credit Default Risk |
1,072,962 | pd.DataFrame(grid_knn.cv_results_)[['params', 'mean_test_score', 'rank_test_score']]<compute_train_metric> | del buro_credit_active_dum, buro_credit_currency_dum, buro_credit_type_dum
gc.collect() | Home Credit Default Risk |
1,072,962 | randF = RandomForestClassifier(n_estimators=10)
evaluate(randF, [0, X3], y )<define_variables> | print('Merge with buro avg')
buro_full = buro_full.merge(right=avg_buro_bal.reset_index() , how='left', on='SK_ID_BUREAU', suffixes=('', '_bur_bal')) | Home Credit Default Risk |
1,072,962 | cleaning_funcs = [remove_urls,
remove_numbers,
remove_extra_spaces]<drop_column> | print('Counting buro per SK_ID_CURR')
nb_bureau_per_curr = buro_full[['SK_ID_CURR', 'SK_ID_BUREAU']].groupby('SK_ID_CURR' ).count()
buro_full['SK_ID_BUREAU'] = buro_full['SK_ID_CURR'].map(nb_bureau_per_curr['SK_ID_BUREAU'] ) | Home Credit Default Risk |
1,072,962 | data = clean_data(data, 'posts', cleaning_funcs )<feature_engineering> | print('Averaging bureau')
avg_buro = buro_full.groupby('SK_ID_CURR' ).mean()
print(avg_buro.head() ) | Home Credit Default Risk |
1,072,962 | tfidf = TfidfVectorizer(stop_words='english',
max_df=0.8,
min_df=2)
X_tfidfV = tfidf.fit_transform(data['posts_processed'] )<prepare_x_and_y> | del buro, buro_full
gc.collect() | Home Credit Default Risk |
1,072,962 | X4 = X_tfidfV[len(test):]
X_sub = X_tfidfV[:len(test)]
y = data[len(test):]<choose_model_class> | print('Read prev')
prev = pd.read_csv('.. /input/previous_application.csv')
| Home Credit Default Risk |
1,072,962 | logR = LogisticRegression(class_weight='balanced', max_iter=1000)
logR_parameters = {'C': [0.01, 0.1, 1.0, 10],
'solver':('liblinear','saga','lbfgs')
}
logR_grid = GridSearchCV(estimator=logR,
param_grid=logR_parameters,
cv=5,
return_train_score=False, )<prepare_output> | prev_cat_features = [
f_ for f_ in prev.columns if prev[f_].dtype == 'object'
]
| Home Credit Default Risk |
1,072,962 | sub = example<predict_on_test> | print('Go to dummies')
prev_dum = pd.DataFrame()
for f_ in prev_cat_features:
prev_dum = pd.concat([prev_dum, pd.get_dummies(prev[f_], prefix=f_ ).astype(np.uint8)], axis=1)
prev = pd.concat([prev, prev_dum], axis=1 ) | Home Credit Default Risk |
1,072,962 | for label in labels:
y_train = y[label]
X_train = X4
logR_grid.fit(X_train, y_train)
sub[label] = logR_grid.predict(X_sub)
results = pd.DataFrame(logR_grid.cv_results_)
print(results[results['rank_test_score']==1][['params', 'mean_test_score']] )<save_to_csv> | del prev_dum
gc.collect() | Home Credit Default Risk |
1,072,962 | sub.to_csv('submission.csv' )<set_options> | print('Counting number of Prevs')
nb_prev_per_curr = prev[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count()
prev['SK_ID_PREV'] = prev['SK_ID_CURR'].map(nb_prev_per_curr['SK_ID_PREV'])
print('Averaging prev')
avg_prev = prev.groupby('SK_ID_CURR' ).mean()
print(avg_prev.head())
del prev
gc.collect()
print('Reading POS_CASH')
pos = pd.read_csv('.. /input/POS_CASH_balance.csv')
print('Go to dummies')
pos = pd.concat([pos, pd.get_dummies(pos['NAME_CONTRACT_STATUS'])], axis=1)
print('Compute nb of prevs per curr')
nb_prevs = pos[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count()
pos['SK_ID_PREV'] = pos['SK_ID_CURR'].map(nb_prevs['SK_ID_PREV'])
print('Go to averages')
avg_pos = pos.groupby('SK_ID_CURR' ).mean()
del pos, nb_prevs
gc.collect()
print('Reading CC balance')
cc_bal = pd.read_csv('.. /input/credit_card_balance.csv')
print('Go to dummies')
cc_bal = pd.concat([cc_bal, pd.get_dummies(cc_bal['NAME_CONTRACT_STATUS'], prefix='cc_bal_status_')], axis=1)
nb_prevs = cc_bal[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count()
cc_bal['SK_ID_PREV'] = cc_bal['SK_ID_CURR'].map(nb_prevs['SK_ID_PREV'])
print('Compute average')
avg_cc_bal = cc_bal.groupby('SK_ID_CURR' ).mean()
avg_cc_bal.columns = ['cc_bal_' + f_ for f_ in avg_cc_bal.columns]
del cc_bal, nb_prevs
gc.collect()
print('Reading Installments')
inst = pd.read_csv('.. /input/installments_payments.csv')
nb_prevs = inst[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count()
inst['SK_ID_PREV'] = inst['SK_ID_CURR'].map(nb_prevs['SK_ID_PREV'])
avg_inst = inst.groupby('SK_ID_CURR' ).mean()
avg_inst.columns = ['inst_' + f_ for f_ in avg_inst.columns]
print('Read data and test')
data = pd.read_csv('.. /input/application_train.csv')
test = pd.read_csv('.. /input/application_test.csv')
print('Shapes : ', data.shape, test.shape)
y = data['TARGET']
del data['TARGET']
categorical_feats = [
f for f in data.columns if data[f].dtype == 'object'
]
categorical_feats
for f_ in categorical_feats:
data[f_], indexer = pd.factorize(data[f_])
test[f_] = indexer.get_indexer(test[f_] ) | Home Credit Default Risk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.