kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
1,511,034 | evaluate_model(y_train, lgb_model, X_train )<save_to_csv> | credit['tmp'] = credit['AMT_BALANCE']/credit['AMT_CREDIT_LIMIT_ACTUAL']
tmp = credit.groupby(["SK_ID_CURR","SK_ID_PREV"])['tmp'].max().reset_index()
tmp = tmp.groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\
.reset_index()
tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4']
for df in [train,test]:
tmp_merge = df[['SK_ID_CURR']]
tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left')
df['min_max_ratio_balance_limit_credit'] = tmp_merge['des1']
df['max_max_ratio_balance_limit_credit'] = tmp_merge['des2']
df['mean_max_ratio_balance_limit_credit'] = tmp_merge['des3']
df['sum_max_ratio_balance_limit_credit'] = tmp_merge['des4']
| Home Credit Default Risk |
1,511,034 | conversion_probs = lgb_model.predict_proba(X_test)[:, 1]
subm = pd.DataFrame({'person': y_test.index, 'label': conversion_probs})
subm.to_csv('submission.csv', index=False )<import_modules> | credit['tmp'] = credit['AMT_BALANCE']/credit['AMT_CREDIT_LIMIT_ACTUAL']
tmp = credit.groupby(["SK_ID_CURR","SK_ID_PREV"])['tmp'].min().reset_index()
tmp = credit.groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\
.reset_index()
tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4']
for df in [train,test]:
tmp_merge = df[['SK_ID_CURR']]
tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left')
df['min_min_ratio_balance_limit_credit'] = tmp_merge['des1']
df['max_min_ratio_balance_limit_credit'] = tmp_merge['des2']
df['mean_min_ratio_balance_limit_credit'] = tmp_merge['des3']
df['sum_min_ratio_balance_limit_credit'] = tmp_merge['des4']
| Home Credit Default Risk |
1,511,034 | print(os.listdir(".. /input"))
<load_from_csv> | doc = [x for x in train.columns if 'FLAG_DOC' in x]
connection = ['FLAG_MOBIL', 'FLAG_EMP_PHONE', 'FLAG_WORK_PHONE',
'FLAG_CONT_MOBILE', 'FLAG_PHONE', 'FLAG_EMAIL',]
le = LabelEncoder()
categorical = ['CODE_GENDER','FLAG_OWN_CAR','FLAG_OWN_REALTY','NAME_EDUCATION_TYPE',
'NAME_FAMILY_STATUS',
'FLAG_MOBIL','FLAG_EMP_PHONE','FLAG_CONT_MOBILE','FLAG_EMAIL','FLAG_WORK_PHONE',
'OCCUPATION_TYPE','ORGANIZATION_TYPE_v2',
'NAME_INCOME_TYPE','NAME_HOUSING_TYPE','NAME_TYPE_SUITE',
'NAME_CONTRACT_TYPE']
for i in categorical:
train[i.lower() ] = le.fit_transform(train[i].fillna("NA"))
test[i.lower() ] = le.transform(test[i].fillna("NA"))
for df in [train,test]:
df['ratio_credit_annity'] = df['AMT_CREDIT']/df['AMT_ANNUITY']
df['ratio_credit_goods'] = df['AMT_CREDIT']/df['AMT_GOODS_PRICE']
df['ratio_min_annuity'] = df['AMT_INCOME_TOTAL']/df['min_amt_annuity']
df['ratio_max_annuity'] = df['AMT_INCOME_TOTAL']/df['max_amt_annuity']
df['ratio_mean_annuity'] = df['AMT_INCOME_TOTAL']/df['mean_amt_annuity']
tmp = df[df['NAME_CONTRACT_TYPE'] == "Revolving loans"].index
df['ratio_credit_goods'].iloc[tmp] = np.nan
df['ratio_credit_annity'].replace(20, np.nan, inplace = True)
df['doc'] = df[doc].mean(axis=1)
df['count_null_cash_loans'].replace(np.nan, 0, inplace = True)
df['count_null_revolving_loans'].replace(np.nan, 0, inplace = True)
df['ratio_cntpay_cur_mean'] = df['ratio_credit_annity']/df['mean_cntpay']
df['ratio_cntpay_cur_min'] = df['ratio_credit_annity']/df['min_cntpay']
df['ratio_cntpay_cur_max'] = df['ratio_credit_annity']/df['max_cntpay']
df['delta_bureau_HC'] = df['max_day_lastdue'] - df['max_endate_bureau']
df['frequency_bureau'] =(df['max_endate_bureau'] - df['min_endate_bureau'])/(df['count_active_bureau_v2'])
df['frequency_bureau'].replace(0, np.nan)
df['sum_delta_install_credit_curr'] = df['AMT_CREDIT'] + df['sum_delta_paid_install']
df['strenght_income'] = df['sum_delta_install_credit_curr']/df['AMT_INCOME_TOTAL']
df['sum_notfinish'] = df['sum_notfinish_cash_loans'] + df['sum_notfinish_consumer_loans']
df['ratio_income_notfinish'] = df['sum_notfinish']/df['AMT_CREDIT']
df['connection'] = df[connection].mean(axis=1)
df['living'] = df[['REG_REGION_NOT_LIVE_REGION',
'REG_REGION_NOT_WORK_REGION', 'LIVE_REGION_NOT_WORK_REGION',
'REG_CITY_NOT_LIVE_CITY', 'REG_CITY_NOT_WORK_CITY',
'LIVE_CITY_NOT_WORK_CITY']].mean(axis=1)
predictors = ['EXT_SOURCE_1','EXT_SOURCE_2','EXT_SOURCE_3','CNT_CHILDREN',
'AMT_INCOME_TOTAL','AMT_CREDIT','AMT_ANNUITY','AMT_GOODS_PRICE',
'CNT_FAM_MEMBERS', 'DAYS_BIRTH','DAYS_EMPLOYED','DAYS_REGISTRATION', 'DAYS_ID_PUBLISH',
'DAYS_LAST_PHONE_CHANGE',
'OBS_30_CNT_SOCIAL_CIRCLE',
'DEF_30_CNT_SOCIAL_CIRCLE',
'OWN_CAR_AGE','REGION_POPULATION_RELATIVE',
'AMT_REQ_CREDIT_BUREAU_YEAR',
'REGION_RATING_CLIENT','REGION_RATING_CLIENT_W_CITY','TOTALAREA_MODE','APARTMENTS_AVG',
'ratio_credit_annity','ratio_credit_goods','doc','connection',
'min_amt_app','max_amt_app','mean_amt_app',
'min_amt_app_v1','max_amt_app_v1','mean_amt_app_v1',
'min_amt_card','max_amt_card','mean_amt_card',
'min_amt_app_fail','max_amt_app_fail','mean_amt_app_fail',
'min_amt_app_v1_fail','max_amt_app_v1_fail','mean_amt_app_v1_fail',
'min_amt_card_fail','max_amt_card_fail','mean_amt_card_fail',
'min_amt_annuity', 'max_amt_annuity', 'mean_amt_annuity',
'min_cntpay', 'max_cntpay', 'mean_cntpay',
'min_day_decision','max_day_decision',
'min_day_termination','max_day_termination',
'max_day_lastdue',
'min_firstdraw', 'max_firstdraw',
'min_firstdraw_decision', 'max_firstdraw_decision', 'mean_firstdraw_decision',
'min_firstdraw_firstdue', 'max_firstdraw_firstdue', 'mean_firstdraw_firstdue',
'min_firstdraw_lastdue', 'max_firstdraw_lastdue', 'mean_firstdraw_lastdue',
'max_day_decision_fail',
'count_notfinish_revolving_loans','count_notfinish_cash_loans','count_notfinish_consumer_loans',
'min_sooner', 'max_sooner', 'mean_sooner',
'min_seller', 'max_seller', 'mean_seller',
'sum_notfinish',
'min_amt_goods_v1','max_amt_goods_v1','mean_amt_goods_v1',
'1st_recent_app','1st_recent_credit','1st_recent_card',
'1st_recent_app_fail','1st_recent_credit_fail','1st_recent_card_fail',
'1st_recent_ratedown','1st_recent_ratedown_fail',
'1st_recent_cntpay',
'ratio_cntpay_cur_mean',
'count_cash_loans','count_consumer_loans','count_clear_reason',
'count_middle', 'count_low_normal', 'count_high', 'count_low_action',
'ratio_middle', 'ratio_low_normal', 'ratio_high', 'ratio_low_action',
'count_active_bureau','count_closed_bureau',
'count_active_bureau_v2',
'min_active_credit_bureau','max_active_credit_bureau',
'mean_active_credit_bureau',
'min_closed_credit_bureau','max_closed_credit_bureau',
'mean_closed_credit_bureau',
'min_active_credit_bureau_v1','max_active_credit_bureau_v1',
'mean_active_credit_bureau_v1',
'min_active_credit_bureau_v2','max_active_credit_bureau_v2',
'mean_active_credit_bureau_v2',
'min_active_credit_bureau_v3','max_active_credit_bureau_v3',
'mean_active_credit_bureau_v3',
'max_endate_bureau',
'1st_endate_bureau',
'max_endatefact_bureau',
'min_deltaendate_bureau','max_deltaendate_bureau','mean_deltaendate_bureau',
'min_duration_bureau','max_duration_bureau','mean_duration_bureau',
'min_sooner_bureau','max_sooner_bureau','mean_sooner_bureau',
'min_annuity_bureau','max_annuity_bureau','mean_annuity_bureau',
'min_debt_bureau','max_debt_bureau','mean_debt_bureau','sum_debt_bureau',
'min_debt_bureau_v1','max_debt_bureau_v1','mean_debt_bureau_v1',
'min_limit_bureau','max_limit_bureau','mean_limit_bureau',
'min_overdue_bureau','max_overdue_bureau','mean_overdue_bureau',
'min_ratio_debt_credit_bureau','max_ratio_debt_credit_bureau','mean_ratio_debt_credit_bureau',
'min_delta_num_install','max_delta_num_install','mean_delta_num_install',
'min_ratio_num_install','max_ratio_num_install','mean_ratio_num_install',
'min_max_version_install','max_max_version_install','mean_max_version_install',
'min_ratio_paid_install','max_ratio_paid_install','mean_ratio_paid_install',
'sum_delta_paid_install','sum_delta_install_credit_curr',
'min_max_num_install','max_max_num_install','mean_max_num_install',
'count_small_payment','count_late_payment_0','count_late_payment_10','count_late_payment_20','count_late_payment_30',
'min_max_ratio_balance_limit_credit','max_max_ratio_balance_limit_credit','mean_max_ratio_balance_limit_credit',
] +\
[i.lower() for i in categorical]
categorical = [i.lower() for i in categorical] | Home Credit Default Risk |
1,511,034 | train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv('.. /input/test.csv')
test.head(5 )<count_missing_values> | NFOLDS = 5
kf = StratifiedKFold(n_splits=NFOLDS, shuffle=True, random_state=2018)
pred_test_full = 0
params = {
'boosting': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.01,
'num_leaves': 25,
'max_depth': 8,
'colsample_bytree': 0.3,
'seed': 101
}
res = []
idx = 0
for dev_index, val_index in kf.split(train, train['TARGET'].values):
dev, valid = train.loc[dev_index,:], train.loc[val_index,:]
dtrain = lgb.Dataset(dev[predictors].values, label=dev['TARGET'].values,
feature_name=predictors,
categorical_feature=categorical
)
dvalid = lgb.Dataset(valid[predictors].values, label=valid['TARGET'].values,
feature_name=predictors,
categorical_feature=categorical
)
print("Training the model...")
lgb_model = lgb.train(params,
dtrain,
valid_sets=[dtrain, dvalid],
valid_names=['train','valid'],
num_boost_round= 30000,
early_stopping_rounds=500,
verbose_eval=100,
feval=None)
oof = pd.DataFrame()
oof['id'] = valid['SK_ID_CURR'].values
oof['target'] = valid['TARGET'].values
oof['preds'] = lgb_model.predict(valid[predictors],num_iteration=lgb_model.best_iteration)
res.append(oof)
pred_test_full += lgb_model.predict(test[predictors],num_iteration=lgb_model.best_iteration)
sub = pd.read_csv(".. /input/sample_submission.csv")
sub['TARGET'] = pred_test_full/NFOLDS
sub.to_csv("sub_lgb.csv", index=False)
res = pd.concat(res, ignore_index=True)
res.to_csv("oof_lgb.csv", index=False)
print(roc_auc_score(res['target'], res['preds']))
| Home Credit Default Risk |
1,511,034 | train.isnull().any()<count_missing_values> | tmp = install.groupby(['SK_ID_PREV','NUM_INSTALMENT_NUMBER'])['DAYS_INSTALMENT'].count().reset_index()
tmp = tmp[tmp['DAYS_INSTALMENT'] > 1]
tmp.columns = ['SK_ID_PREV','NUM_INSTALMENT_NUMBER','count_dup']
install = install.merge(tmp, on = ['SK_ID_PREV','NUM_INSTALMENT_NUMBER'], how='left')
tmp = install[install['count_dup'] > 1] | Home Credit Default Risk |
1,511,034 | test.isnull().any()<drop_column> | install.drop(['count_dup_x','count_dup_y'], axis=1, inplace = True ) | Home Credit Default Risk |
1,511,034 | test_ids = test['Id']
test = test.drop(['Id'], 1 )<categorify> | tmp.sort_values(by=['SK_ID_PREV','NUM_INSTALMENT_NUMBER','DAYS_ENTRY_PAYMENT'] ) | Home Credit Default Risk |
1,511,034 | le = preprocessing.LabelEncoder()
le.fit(train['class'])
print(list(le.classes_))
train['class'] = le.transform(train['class'] )<drop_column> | tmp['SK_ID_CURR'].value_counts() | Home Credit Default Risk |
1,114,299 | train_ids = train['Id']
train = train.drop(['Id'], 1 )<split> | def get_combined_dataset() :
application_train = pd.read_csv('.. /input/application_train.csv')
application_test = pd.read_csv('.. /input/application_test.csv')
application=application_train.append(application_test, ignore_index=True,sort=False)
application.set_index('SK_ID_CURR')
return(application)
def get_application_dataset() :
df = get_combined_dataset()
filteredColList =['NAME_TYPE_SUITE','NAME_INCOME_TYPE','NAME_EDUCATION_TYPE','NAME_FAMILY_STATUS','NAME_HOUSING_TYPE','OCCUPATION_TYPE',
'WEEKDAY_APPR_PROCESS_START','ORGANIZATION_TYPE','FONDKAPREMONT_MODE']
df = df[[x for x in list(df)if x not in filteredColList]]
oheCols = ['NAME_CONTRACT_TYPE','CODE_GENDER','FLAG_OWN_CAR','FLAG_OWN_REALTY','HOUSETYPE_MODE','WALLSMATERIAL_MODE','EMERGENCYSTATE_MODE']
df.loc[df.CODE_GENDER == 'XNA' ,'CODE_GENDER'] = 'F'
df.loc[(df.DAYS_EMPLOYED > 0),'DAYS_EMPLOYED'] = np.nan
df.loc[(df.REGION_RATING_CLIENT_W_CITY < 0),'REGION_RATING_CLIENT_W_CITY'] = np.nan
df.loc[(df.OBS_30_CNT_SOCIAL_CIRCLE > 10),'OBS_30_CNT_SOCIAL_CIRCLE'] = 10
df.loc[(df.DEF_30_CNT_SOCIAL_CIRCLE > 10),'DEF_30_CNT_SOCIAL_CIRCLE'] = 10
df.loc[(df.OBS_60_CNT_SOCIAL_CIRCLE > 10),'OBS_60_CNT_SOCIAL_CIRCLE'] = 10
df.loc[(df.DEF_60_CNT_SOCIAL_CIRCLE > 10),'DEF_60_CNT_SOCIAL_CIRCLE'] = 10
df.loc[(df.AMT_REQ_CREDIT_BUREAU_QRT > 10),'AMT_REQ_CREDIT_BUREAU_QRT'] = 10
df = pd.get_dummies(df,columns=oheCols)
df['NEW_INCOME2Credit']=df['AMT_CREDIT']/df['AMT_INCOME_TOTAL']
df['NEW_Credit2ANNUITY']=df['AMT_ANNUITY']/df['AMT_CREDIT']
df['NEW_INCOME2ANNUITY']=df['AMT_ANNUITY']/df['AMT_INCOME_TOTAL']
df['NEW_DAYS_EMPLOYED2DAYS_BIRTH'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['NEW_AMT_INCOME_TOTAL2CNT_FAM_MEMBERS'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['NEW_CREDIT_TO_ANNUITY_RATIO'] = df['AMT_CREDIT'] / df['AMT_ANNUITY']
df['NEW_CREDIT2GOODS'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE']
df['NEW_INC_PER_CHLD'] = df['AMT_INCOME_TOTAL'] /(1 + df['CNT_CHILDREN'])
df['NEW_SOURCES_PROD'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3']
df['NEW_EXT_SOURCES_MEAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)
df['NEW_SCORES_STD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)
df['NEW_SCORES_STD'] = df['NEW_SCORES_STD'].fillna(df['NEW_SCORES_STD'].mean())
df['NEW_OWN_CAR_AGE2DAYS_BIRTH'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH']
df['NEW_OWN_CAR_AGE2DAYS_EMPLOYED'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['NEW_DAYS_LAST_PHONE_CHANGE2DAYS_BIRTH'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
df['NEW_DAYS_LAST_PHONE_CHANGE2DAYS_EMPLOYED'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']
return(df)
def transform_application(df):
logTransformation = ['AMT_INCOME_TOTAL','AMT_CREDIT','AMT_ANNUITY','AMT_GOODS_PRICE','NEW_AMT_INCOME_TOTAL2CNT_FAM_MEMBERS','NEW_INC_PER_CHLD']
df[logTransformation] = df[logTransformation].apply(lambda x : np.log(x+1),axis=1)
sqrtTransformation = ['DAYS_BIRTH','DAYS_EMPLOYED','DAYS_REGISTRATION','DAYS_ID_PUBLISH','OWN_CAR_AGE','DAYS_LAST_PHONE_CHANGE','NEW_INCOME2Credit']
df[sqrtTransformation] = df[sqrtTransformation].apply(lambda x: np.sqrt(np.abs(x)) ,axis=1)
return(df ) | Home Credit Default Risk |
1,114,299 | x_data = train.drop('class',axis=1)
y_labels = train['class']
X_train, X_test, y_train, y_test = train_test_split(x_data, y_labels, test_size=0.3, random_state=101 )<features_selection> | def bureau_balance() :
df = pd.read_csv('.. /input/bureau_balance.csv')
df1 = df.groupby(['SK_ID_BUREAU'] ).agg(
{'MONTHS_BALANCE': min,
})
df2 = df.groupby(['SK_ID_BUREAU'] ).agg(
{'MONTHS_BALANCE': max,
} ).reset_index()
df2 = pd.merge(df2,df,on=['SK_ID_BUREAU','MONTHS_BALANCE'],how='inner')
df2 = pd.crosstab(df2['SK_ID_BUREAU'], df2['STATUS'])
df = pd.merge(df1,df2,on=['SK_ID_BUREAU'],how='left' ).reset_index()
df.columns = ['SK_ID_BUREAU','MONTHS_BALANCE','BB_S_0','BB_S_1','BB_S_2','BB_S_3','BB_S_4','BB_S_5','BB_S_C','BB_S_X']
return(df)
def get_bureau_dataset() :
b = pd.read_csv('.. /input/bureau.csv')
bb = bureau_balance()
df = pd.merge(b,bb,on='SK_ID_BUREAU',how='left')
df.loc[(df.DAYS_CREDIT_ENDDATE < 0)|(df.DAYS_CREDIT_ENDDATE > 5000),'DAYS_CREDIT_ENDDATE'] = np.nan
df.loc[(df.DAYS_ENDDATE_FACT < -5000),'DAYS_ENDDATE_FACT'] = np.nan
df.loc[(df.AMT_CREDIT_MAX_OVERDUE > 40000),'AMT_CREDIT_MAX_OVERDUE'] = 40000
df.loc[(df.DAYS_CREDIT_UPDATE < -3000),'DAYS_CREDIT_UPDATE'] = np.nan
df.loc[(df.AMT_CREDIT_SUM_DEBT < 0),'AMT_CREDIT_SUM_DEBT'] = np.nan
df.loc[(df.AMT_CREDIT_SUM_LIMIT < 0),'AMT_CREDIT_SUM_LIMIT'] = np.nan
All = df.groupby(['SK_ID_CURR'] ).agg(
{'DAYS_CREDIT': [min, max],
'CREDIT_DAY_OVERDUE':max,
'DAYS_CREDIT_ENDDATE':max,
'DAYS_ENDDATE_FACT':[min,max],
'AMT_CREDIT_MAX_OVERDUE':max,
'CNT_CREDIT_PROLONG':max,
'AMT_CREDIT_SUM':max,
'AMT_CREDIT_SUM_DEBT':max,
'AMT_CREDIT_SUM_LIMIT':max,
'DAYS_CREDIT_UPDATE':min,
'AMT_ANNUITY':max,
'MONTHS_BALANCE':min,
'BB_S_0':sum,
'BB_S_1':sum,
'BB_S_2':sum,
'BB_S_3':sum,
'BB_S_4':sum,
'BB_S_5':sum,
'BB_S_C':sum,
'BB_S_X':sum
})
All.columns = ["_all_".join(x)for x in All.columns.ravel() ]
Active = df.query('CREDIT_ACTIVE == "Active"' ).groupby(['SK_ID_CURR'] ).agg(
{'CREDIT_DAY_OVERDUE':max,
'AMT_CREDIT_MAX_OVERDUE': max,
'CNT_CREDIT_PROLONG':[max,sum],
'AMT_CREDIT_SUM':sum,
'AMT_CREDIT_SUM_DEBT':sum,
'AMT_CREDIT_SUM_LIMIT':sum,
'AMT_CREDIT_SUM_OVERDUE':sum,
'DAYS_CREDIT_UPDATE':min,
'AMT_ANNUITY':sum,
'MONTHS_BALANCE':min,
'BB_S_0':sum,
'BB_S_1':sum,
'BB_S_2':sum,
'BB_S_3':sum,
'BB_S_4':sum,
'BB_S_5':sum,
'BB_S_C':sum,
'BB_S_X':sum
})
Active.columns = ["_act_".join(x)for x in Active.columns.ravel() ]
CREDIT_ACTIVE_ctab = pd.crosstab(df['SK_ID_CURR'], df['CREDIT_ACTIVE'] ).rename_axis(None, axis=1)
dfs = [All,Active,CREDIT_ACTIVE_ctab]
df_final = reduce(lambda left,right: pd.merge(left,right,on='SK_ID_CURR',how='outer'), dfs)
df_final.reset_index(inplace=True)
return(df_final)
CREDIT_ACTIVE_ctab = pd.crosstab(df['SK_ID_CURR'], df['CREDIT_ACTIVE'] ).rename_axis(None, axis=1)
dfs = [All,Active,CREDIT_ACTIVE_ctab]
df_final = reduce(lambda left,right: pd.merge(left,right,on='SK_ID_CURR',how='outer'), dfs)
df_final.reset_index(inplace=True)
return(df_final)
def bureau_newFeature(df):
df['AMT_CREDIT_SUM_sum2AMT_CREDIT_SUM_DEBT_sum'] = df['AMT_CREDIT_SUM_DEBT_act_sum']/df['AMT_CREDIT_SUM_act_sum']
df['AMT_CREDIT_SUM_sum2AMT_ANNUITY_sum'] = df['AMT_CREDIT_SUM_act_sum']/df['AMT_ANNUITY_act_sum']
df['AMT_CREDIT_SUM_DEBT_sum2AMT_ANNUITY_sum'] = df['AMT_CREDIT_SUM_DEBT_act_sum']/df['AMT_ANNUITY_act_sum']
df.replace([np.inf, -np.inf], np.nan,inplace=True)
df.loc[df.AMT_CREDIT_SUM_sum2AMT_CREDIT_SUM_DEBT_sum>2,'AMT_CREDIT_SUM_sum2AMT_CREDIT_SUM_DEBT_sum'] = np.nan
df.loc[df.AMT_CREDIT_SUM_sum2AMT_ANNUITY_sum>120,'AMT_CREDIT_SUM_sum2AMT_ANNUITY_sum'] = np.nan
df.loc[df.AMT_CREDIT_SUM_DEBT_sum2AMT_ANNUITY_sum>80,'AMT_CREDIT_SUM_DEBT_sum2AMT_ANNUITY_sum'] = np.nan
return(df)
def transform_bureau(df):
logTransformation = ['CREDIT_DAY_OVERDUE_all_max','AMT_CREDIT_MAX_OVERDUE_all_max','AMT_CREDIT_SUM_all_max','AMT_CREDIT_SUM_DEBT_all_max',
'AMT_CREDIT_SUM_LIMIT_all_max','AMT_ANNUITY_all_max','AMT_CREDIT_MAX_OVERDUE_act_max','AMT_CREDIT_SUM_act_sum',
'AMT_CREDIT_SUM_DEBT_act_sum','AMT_CREDIT_SUM_LIMIT_act_sum','AMT_CREDIT_SUM_OVERDUE_act_sum','AMT_ANNUITY_act_sum']
df[logTransformation] = df[logTransformation].apply(lambda x : np.log(x+1),axis=1)
sartLogTransformation = ['CREDIT_DAY_OVERDUE_act_max','DAYS_CREDIT_UPDATE_act_min']
df[sartLogTransformation] = df[sartLogTransformation].apply(lambda x : np.sqrt(np.log(np.abs(x+1))),axis=1)
sqrtTransformation = ['DAYS_CREDIT_all_min','DAYS_CREDIT_all_max','DAYS_CREDIT_ENDDATE_all_max','DAYS_ENDDATE_FACT_all_min','DAYS_ENDDATE_FACT_all_max',
'DAYS_CREDIT_UPDATE_all_min','MONTHS_BALANCE_all_min','MONTHS_BALANCE_act_min']
df[sqrtTransformation] = df[sqrtTransformation].apply(lambda x: np.sqrt(np.abs(x)) ,axis=1)
return(df ) | Home Credit Default Risk |
1,114,299 | buying = tf.feature_column.categorical_column_with_vocabulary_list("buying", ['high', 'low', 'med', 'vhigh'])
maintainence = tf.feature_column.categorical_column_with_vocabulary_list("maintainence", ['high', 'low', 'med', 'vhigh'])
doors = tf.feature_column.categorical_column_with_vocabulary_list("doors", ['3', '4', '5more', '2'])
persons = tf.feature_column.categorical_column_with_vocabulary_list("persons", ['4', 'more', '2'])
lug_boot = tf.feature_column.categorical_column_with_vocabulary_list("lug_boot", ['small', 'med', 'big'])
safety = tf.feature_column.categorical_column_with_vocabulary_list("safety", ['low', 'med', 'high'] )<define_variables> | def get_previous_application() :
df = pd.read_csv('.. /input/previous_application.csv')
df.loc[df.DAYS_FIRST_DRAWING >0,'DAYS_FIRST_DRAWING'] = np.nan
df.loc[df.DAYS_FIRST_DUE >0,'DAYS_FIRST_DUE'] = np.nan
df.loc[df.DAYS_LAST_DUE_1ST_VERSION >2000,'DAYS_LAST_DUE_1ST_VERSION'] = np.nan
df.loc[df.DAYS_LAST_DUE >3000,'DAYS_LAST_DUE'] = np.nan
df.loc[df.DAYS_TERMINATION >3000,'DAYS_TERMINATION'] = np.nan
dff = df.query('FLAG_LAST_APPL_PER_CONTRACT == "Y" and NFLAG_LAST_APPL_IN_DAY == 1')
NAME_CONTRACT_STATUS_ctab = pd.crosstab(df['SK_ID_CURR'], df['NAME_CONTRACT_STATUS'])
df['AMT_ANNUITY2AMT_CREDIT'] = df['AMT_ANNUITY']/df['AMT_CREDIT']
df['AMT_APPLICATION2AMT_CREDIT'] = df['AMT_APPLICATION']/df['AMT_CREDIT']
df['AMT_GOODS_PRICE2AMT_CREDIT'] = df['AMT_GOODS_PRICE']/df['AMT_CREDIT']
NAME_CONTRACT_STATUS_ctab = pd.crosstab(df['SK_ID_CURR'], df['NAME_CONTRACT_STATUS'])
df_grouped = df.query('NAME_CONTRACT_STATUS != "Refused" and FLAG_LAST_APPL_PER_CONTRACT == "Y" and NFLAG_LAST_APPL_IN_DAY == 1')\
.groupby(['SK_ID_CURR'])\
.agg(
{'AMT_ANNUITY':max,
'AMT_APPLICATION':max,
'AMT_CREDIT':max,
'AMT_DOWN_PAYMENT':max,
'AMT_GOODS_PRICE':max,
'RATE_DOWN_PAYMENT':[min, max,'mean'],
'RATE_INTEREST_PRIMARY':[min, max,'mean'],
'RATE_INTEREST_PRIVILEGED':[min, max,'mean'],
'DAYS_DECISION':[min, max,'mean'],
'CNT_PAYMENT':[min, max,'mean'],
'DAYS_FIRST_DRAWING':min,
'DAYS_FIRST_DUE':[min, max],
'DAYS_LAST_DUE_1ST_VERSION':[min, max],
'DAYS_LAST_DUE':[min, max],
'DAYS_TERMINATION':[min, max],
'NFLAG_INSURED_ON_APPROVAL':sum
})
df_final = pd.merge(df_grouped,NAME_CONTRACT_STATUS_ctab,on='SK_ID_CURR',how='outer')
df_final.reset_index(inplace=True)
df_final.columns = ['SK_ID_CURR','AMT_ANNUITY_max','AMT_APPLICATION_max','AMT_CREDIT_max','AMT_DOWN_PAYMENT_max','AMT_GOODS_PRICE_max',
'RATE_DOWN_PAYMENT_min','RATE_DOWN_PAYMENT_max','RATE_INTEREST_PRIMARY_min','RATE_INTEREST_PRIMARY_max',
'RATE_INTEREST_PRIVILEGED_min','RATE_INTEREST_PRIVILEGED_max','DAYS_DECISION_min','DAYS_DECISION_max','CNT_PAYMENT_min',
'CNT_PAYMENT_max','DAYS_FIRST_DRAWING_min','DAYS_FIRST_DUE_min','DAYS_FIRST_DUE_max',
'DAYS_LAST_DUE_1ST_VERSION_min','DAYS_LAST_DUE_1ST_VERSION_max','DAYS_LAST_DUE_min','DAYS_LAST_DUE_max','DAYS_TERMINATION_min',
'DAYS_TERMINATION_max','NFLAG_INSURED_ON_APPROVAL_sum','Approved','Canceled','Refused','Unused_offer']
df_final.head()
return(df_final)
def transform_previous_application(df):
logTransformation = ['AMT_ANNUITY_max','AMT_APPLICATION_max','AMT_CREDIT_max', 'AMT_DOWN_PAYMENT_max','AMT_GOODS_PRICE_max']
df[logTransformation] = df[logTransformation].apply(lambda x : np.log(x+1),axis=1)
sqrtTransformation = ['DAYS_DECISION_min','DAYS_DECISION_max','DAYS_FIRST_DRAWING_min','DAYS_FIRST_DUE_min','DAYS_FIRST_DUE_max','DAYS_LAST_DUE_min',
'DAYS_LAST_DUE_max','DAYS_TERMINATION_min','DAYS_TERMINATION_max']
df[sqrtTransformation] = df[sqrtTransformation].apply(lambda x: np.sqrt(np.abs(x)) ,axis=1)
return(df ) | Home Credit Default Risk |
1,114,299 | feat_cols = [buying, maintainence, doors, persons, lug_boot, safety]<train_model> | def get_previous_application() :
df = pd.read_csv('.. /input/previous_application.csv')
df.loc[df.DAYS_FIRST_DRAWING >0,'DAYS_FIRST_DRAWING'] = np.nan
df.loc[df.DAYS_FIRST_DUE >0,'DAYS_FIRST_DUE'] = np.nan
df.loc[df.DAYS_LAST_DUE_1ST_VERSION >2000,'DAYS_LAST_DUE_1ST_VERSION'] = np.nan
df.loc[df.DAYS_LAST_DUE >3000,'DAYS_LAST_DUE'] = np.nan
df.loc[df.DAYS_TERMINATION >3000,'DAYS_TERMINATION'] = np.nan
df['AMT_ANNUITY2AMT_CREDIT'] = df['AMT_ANNUITY']/df['AMT_CREDIT']
df['AMT_APPLICATION2AMT_CREDIT'] = df['AMT_APPLICATION']/df['AMT_CREDIT']
df['AMT_GOODS_PRICE2AMT_CREDIT'] = df['AMT_GOODS_PRICE']/df['AMT_CREDIT']
NAME_CONTRACT_STATUS_ctab = pd.crosstab(df['SK_ID_CURR'], df['NAME_CONTRACT_STATUS'])
df_grouped = df.query('NAME_CONTRACT_STATUS != "Refused" and FLAG_LAST_APPL_PER_CONTRACT == "Y" and NFLAG_LAST_APPL_IN_DAY == 1')\
.groupby(['SK_ID_CURR'])\
.agg(
{'AMT_ANNUITY':max,
'AMT_APPLICATION':max,
'AMT_CREDIT':max,
'AMT_DOWN_PAYMENT':max,
'AMT_GOODS_PRICE':max,
'RATE_DOWN_PAYMENT':[min, max,'mean'],
'RATE_INTEREST_PRIMARY':[min, max,'mean'],
'RATE_INTEREST_PRIVILEGED':[min, max,'mean'],
'DAYS_DECISION':[min, max,'mean'],
'CNT_PAYMENT':[min, max,'mean'],
'DAYS_FIRST_DRAWING':min,
'DAYS_FIRST_DUE':[min, max],
'DAYS_LAST_DUE_1ST_VERSION':[min, max],
'DAYS_LAST_DUE':[min, max],
'DAYS_TERMINATION':[min, max],
'NFLAG_INSURED_ON_APPROVAL':sum
})
df_final = pd.merge(df_grouped,NAME_CONTRACT_STATUS_ctab,on='SK_ID_CURR',how='outer')
df_final.reset_index(inplace=True)
return(df_final)
def transform_previous_application(df):
logTransformation = ['AMT_ANNUITY_max','AMT_APPLICATION_max','AMT_CREDIT_max', 'AMT_DOWN_PAYMENT_max','AMT_GOODS_PRICE_max']
df[logTransformation] = df[logTransformation].apply(lambda x : np.log(x+1),axis=1)
sqrtTransformation = ['DAYS_DECISION_min','DAYS_DECISION_max','DAYS_FIRST_DRAWING_min','DAYS_FIRST_DUE_min','DAYS_FIRST_DUE_max','DAYS_LAST_DUE_min',
'DAYS_LAST_DUE_max','DAYS_TERMINATION_min','DAYS_TERMINATION_max']
df[sqrtTransformation] = df[sqrtTransformation].apply(lambda x: np.sqrt(np.abs(x)) ,axis=1)
return(df ) | Home Credit Default Risk |
1,114,299 | input_func = tf.estimator.inputs.pandas_input_fn(x=X_train,
y=y_train,
batch_size=500,
num_epochs=10000,
shuffle=True )<train_model> | def get_POS_CASH_balance() :
POS_CASH_balance = pd.read_csv('.. /input/POS_CASH_balance.csv')
Closed_Loans = POS_CASH_balance[POS_CASH_balance['SK_ID_PREV'].isin(POS_CASH_balance.query('NAME_CONTRACT_STATUS == "Completed"' ).SK_ID_PREV)]
Active_Loans = POS_CASH_balance[~POS_CASH_balance['SK_ID_PREV'].isin(POS_CASH_balance.query('NAME_CONTRACT_STATUS == "Active" and MONTHS_BALANCE == -1' ).SK_ID_PREV)]
Active = Active_Loans.groupby(['SK_ID_CURR'] ).agg(
{ 'MONTHS_BALANCE':min,
'CNT_INSTALMENT':[min,max],
'CNT_INSTALMENT_FUTURE':[min,max]
})
Closed = Closed_Loans.groupby(['SK_ID_CURR'] ).agg(
{ 'MONTHS_BALANCE':[min,max],
'CNT_INSTALMENT':max
})
NAME_CONTRACT_STATUS = POS_CASH_balance.query('(NAME_CONTRACT_STATUS == "Completed")or(NAME_CONTRACT_STATUS == "Active" and MONTHS_BALANCE == -1)')[['SK_ID_PREV','SK_ID_CURR','NAME_CONTRACT_STATUS']].drop_duplicates()
NAME_CONTRACT_STATUS_ctab = pd.crosstab(NAME_CONTRACT_STATUS['SK_ID_CURR'], NAME_CONTRACT_STATUS['NAME_CONTRACT_STATUS'])
dfs = [NAME_CONTRACT_STATUS_ctab,Active,Closed]
df_final = reduce(lambda left,right: pd.merge(left,right,on='SK_ID_CURR',how='outer'), dfs)
df_final.reset_index(inplace=True)
df_final.columns = ['SK_ID_CURR','Active','Completed','MONTHS_BALANCE_A_min','CNT_INSTALMENT_A_min','CNT_INSTALMENT_A_max','CNT_INSTALMENT_FUTURE_A_min',
'CNT_INSTALMENT_FUTURE_max','MONTHS_BALANCE_C_min','MONTHS_BALANCE_C_max','CNT_INSTALMENT_C_max']
return(df_final)
def transform_POS_CASH_balance(df):
sqrtTransformation = ['CNT_INSTALMENT_A_min','CNT_INSTALMENT_A_max','CNT_INSTALMENT_FUTURE_A_min','CNT_INSTALMENT_FUTURE_max','CNT_INSTALMENT_C_max']
df[sqrtTransformation] = df[sqrtTransformation].apply(lambda x: np.sqrt(np.abs(x)) ,axis=1)
return(df ) | Home Credit Default Risk |
1,114,299 | input_func_test = tf.estimator.inputs.pandas_input_fn(x=test,
num_epochs=500,
shuffle=False )<choose_model_class> | def get_installment_payments() :
instalment_payments = pd.read_csv('.. /input/installments_payments.csv')
instalment_payments['MONTH']=(instalment_payments['DAYS_INSTALMENT']/30 ).astype(int)
Active = instalment_payments.query('MONTH == -1' ).groupby('SK_ID_CURR' ).agg({
'NUM_INSTALMENT_VERSION':max,
'NUM_INSTALMENT_NUMBER':max,
'AMT_INSTALMENT':sum,
'AMT_PAYMENT':sum
})
Closed = instalment_payments.groupby('SK_ID_CURR' ).agg({
'NUM_INSTALMENT_VERSION':max,
'NUM_INSTALMENT_NUMBER':max,
'DAYS_INSTALMENT':min,
'AMT_INSTALMENT':[max,min]
})
df_final = pd.merge(Active,Closed,on='SK_ID_CURR',how='outer')
df_final.reset_index(inplace=True)
df_final.columns=['SK_ID_CURR','NUM_INSTALMENT_VERSION_A_max','NUM_INSTALMENT_NUMBER_A_max','AMT_INSTALMENT_A_sum','AMT_PAYMENT_A_sum',
'NUM_INSTALMENT_VERSION_C_max','NUM_INSTALMENT_NUMBER_C_max','DAYS_INSTALMENT_C_min','AMT_INSTALMENT_C_max','AMT_INSTALMENT_c_min']
return(df_final)
def transform_installment_payments(df):
logTransformation = ['AMT_INSTALMENT_A_sum','AMT_PAYMENT_A_sum','AMT_INSTALMENT_C_max','AMT_INSTALMENT_c_min']
df[logTransformation] = df[logTransformation].apply(lambda x : np.log(x+1),axis=1)
sqrtTransformation = ['NUM_INSTALMENT_VERSION_A_max','NUM_INSTALMENT_NUMBER_A_max','NUM_INSTALMENT_VERSION_C_max','NUM_INSTALMENT_NUMBER_C_max',
'DAYS_INSTALMENT_C_min']
df[sqrtTransformation] = df[sqrtTransformation].apply(lambda x: np.sqrt(np.abs(x)) ,axis=1)
return(df ) | Home Credit Default Risk |
1,114,299 | model = tf.estimator.LinearClassifier(feature_columns=feat_cols, n_classes=4 )<train_model> | def get_credit_card_balance() :
df = pd.read_csv('.. /input/credit_card_balance.csv')
dfa = df.query('NAME_CONTRACT_STATUS == "Active"' ).groupby(['SK_ID_CURR','MONTHS_BALANCE'] ).agg({
'AMT_BALANCE':sum,
'AMT_CREDIT_LIMIT_ACTUAL':sum,
'AMT_DRAWINGS_ATM_CURRENT':sum,
'AMT_DRAWINGS_CURRENT':sum,
'AMT_DRAWINGS_OTHER_CURRENT':sum,
'AMT_DRAWINGS_POS_CURRENT':sum,
'AMT_INST_MIN_REGULARITY':sum,
'AMT_PAYMENT_CURRENT':sum,
'AMT_PAYMENT_TOTAL_CURRENT':sum,
'AMT_RECEIVABLE_PRINCIPAL':sum,
'AMT_RECIVABLE':sum,
'AMT_TOTAL_RECEIVABLE':sum,
'CNT_DRAWINGS_ATM_CURRENT':sum,
'CNT_DRAWINGS_CURRENT':sum,
'CNT_DRAWINGS_POS_CURRENT':sum,
'CNT_DRAWINGS_OTHER_CURRENT':sum,
'CNT_INSTALMENT_MATURE_CUM':sum,
'SK_DPD':max,
'SK_DPD_DEF':max
})
dfa = dfa.groupby(['SK_ID_CURR'] ).agg(['mean',max,min,'std'])
dfa.columns = ["_".join(x)for x in dfa.columns.ravel() ]
dfp = pd.pivot_table(df.query('NAME_CONTRACT_STATUS == ["Active","Completed","Demand"]' ).groupby(['SK_ID_CURR','SK_ID_PREV'] ).agg({'MONTHS_BALANCE':max} ).merge(df[['SK_ID_CURR','SK_ID_PREV','MONTHS_BALANCE','NAME_CONTRACT_STATUS']],on=['SK_ID_CURR','SK_ID_PREV','MONTHS_BALANCE']),
values='SK_ID_PREV',index=['SK_ID_CURR'],columns=['NAME_CONTRACT_STATUS'],aggfunc=np.size ).reset_index().rename_axis(None, axis=1)
dfp.fillna(0,inplace=True)
dfcc = dfa.merge(dfp,on='SK_ID_CURR')
dfcc[dfcc < 0] = 0
return(dfcc)
def transform_credit_card_balance(df):
logTransformation = ['AMT_BALANCE_mean','AMT_BALANCE_max','AMT_BALANCE_min','AMT_BALANCE_std',
'AMT_CREDIT_LIMIT_ACTUAL_mean','AMT_CREDIT_LIMIT_ACTUAL_max','AMT_CREDIT_LIMIT_ACTUAL_min','AMT_CREDIT_LIMIT_ACTUAL_std',
'AMT_DRAWINGS_ATM_CURRENT_mean','AMT_DRAWINGS_ATM_CURRENT_max','AMT_DRAWINGS_ATM_CURRENT_min','AMT_DRAWINGS_ATM_CURRENT_std',
'AMT_DRAWINGS_CURRENT_mean','AMT_DRAWINGS_CURRENT_max','AMT_DRAWINGS_CURRENT_min','AMT_DRAWINGS_CURRENT_std',
'AMT_DRAWINGS_OTHER_CURRENT_mean','AMT_DRAWINGS_OTHER_CURRENT_max','AMT_DRAWINGS_OTHER_CURRENT_std','AMT_DRAWINGS_POS_CURRENT_mean',
'AMT_DRAWINGS_POS_CURRENT_max','AMT_DRAWINGS_POS_CURRENT_min','AMT_DRAWINGS_POS_CURRENT_std','AMT_INST_MIN_REGULARITY_mean',
'AMT_INST_MIN_REGULARITY_max','AMT_INST_MIN_REGULARITY_min','AMT_INST_MIN_REGULARITY_std','AMT_PAYMENT_CURRENT_mean',
'AMT_PAYMENT_CURRENT_max','AMT_PAYMENT_CURRENT_min','AMT_PAYMENT_CURRENT_std','AMT_PAYMENT_TOTAL_CURRENT_mean','AMT_PAYMENT_TOTAL_CURRENT_max',
'AMT_PAYMENT_TOTAL_CURRENT_min','AMT_PAYMENT_TOTAL_CURRENT_std','AMT_RECEIVABLE_PRINCIPAL_mean','AMT_RECEIVABLE_PRINCIPAL_max','AMT_RECEIVABLE_PRINCIPAL_min',
'AMT_RECEIVABLE_PRINCIPAL_std','AMT_RECIVABLE_mean','AMT_RECIVABLE_max','AMT_RECIVABLE_min','AMT_RECIVABLE_std','AMT_TOTAL_RECEIVABLE_mean',
'AMT_TOTAL_RECEIVABLE_max','AMT_TOTAL_RECEIVABLE_min','AMT_TOTAL_RECEIVABLE_std']
df[logTransformation] = df[logTransformation].apply(lambda x : np.log(x+1),axis=1)
sqrtTransformation = ['CNT_DRAWINGS_ATM_CURRENT_mean','CNT_DRAWINGS_ATM_CURRENT_max','CNT_DRAWINGS_ATM_CURRENT_min',
'CNT_DRAWINGS_ATM_CURRENT_std','CNT_DRAWINGS_CURRENT_mean','CNT_DRAWINGS_CURRENT_max','CNT_DRAWINGS_CURRENT_min',
'CNT_DRAWINGS_CURRENT_std','CNT_DRAWINGS_POS_CURRENT_mean','CNT_DRAWINGS_POS_CURRENT_max','CNT_DRAWINGS_POS_CURRENT_min',
'CNT_DRAWINGS_POS_CURRENT_std','CNT_INSTALMENT_MATURE_CUM_mean','CNT_INSTALMENT_MATURE_CUM_max','CNT_INSTALMENT_MATURE_CUM_min',
'CNT_INSTALMENT_MATURE_CUM_std','SK_DPD_mean','SK_DPD_std','SK_DPD_DEF_mean','SK_DPD_DEF_max','SK_DPD_DEF_std']
df[sqrtTransformation] = df[sqrtTransformation].apply(lambda x: np.sqrt(np.abs(x)) ,axis=1)
return(df ) | Home Credit Default Risk |
1,114,299 | model.train(input_fn=input_func,steps=5000 )<train_model> | def getFinalDataSet() :
application = get_application_dataset()
application = transform_application(get_application_dataset())
bureau = transform_bureau(get_bureau_dataset())
previous_application = get_previous_application()
POS_CASH_balance = transform_POS_CASH_balance(get_POS_CASH_balance())
installment_payments = transform_installment_payments(get_installment_payments())
credit_card_balance = transform_credit_card_balance(get_credit_card_balance())
dfs = [application, bureau, previous_application, POS_CASH_balance, installment_payments, credit_card_balance]
df = reduce(lambda left,right: pd.merge(left,right,on='SK_ID_CURR',how='left'), dfs)
return(df ) | Home Credit Default Risk |
1,114,299 | pred_fn = tf.estimator.inputs.pandas_input_fn(x=X_test, batch_size=len(X_test), shuffle=False )<predict_on_test> | def scaleNfillna(df):
df.replace([np.inf, -np.inf], np.nan,inplace=True)
df.fillna(0,inplace=True)
scaler = MinMaxScaler()
df = scaler.fit_transform(df)
return(df ) | Home Credit Default Risk |
1,114,299 | predictions = list(model.predict(input_fn=pred_fn))
probs = pd.Series([pred['class_ids'][0] for pred in predictions] )<compute_test_metric> | from keras.models import Sequential, Model
from keras.layers import Input, Dense, Dropout, BatchNormalization
from sklearn.model_selection import train_test_split
from sklearn import metrics | Home Credit Default Risk |
1,114,299 | final_preds = []
for pred in predictions:
final_preds.append(pred['class_ids'][0])
print(classification_report(y_test,final_preds))<train_model> | def kfold_lightgbm(df, num_folds, stratified = False, debug= False):
train_df = df[df['TARGET'].notnull() ]
test_df = df[df['TARGET'].isnull() ]
print("Starting LightGBM.Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape))
del df
gc.collect()
if stratified:
folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001)
else:
folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001)
oof_preds = np.zeros(train_df.shape[0])
sub_preds = np.zeros(test_df.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns if f not in ['SK_ID_CURR','TARGET']]
for n_fold,(train_idx, valid_idx)in enumerate(folds.split(train_df[feats], train_df['TARGET'])) :
train_x, train_y= train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]
valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]
clf = LGBMClassifier(
nthread=4,
n_estimators=10000,
learning_rate=0.02,
num_leaves=34,
colsample_bytree=0.9497036,
subsample=0.8715623,
max_depth=8,
reg_alpha=0.041545473,
reg_lambda=0.0735294,
min_split_gain=0.0222415,
min_child_weight=39.3259775,
silent=-1,
verbose=-1,
)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)],
eval_metric= 'auc', verbose= 100, early_stopping_rounds= 200)
oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d AUC : %.6f' %(n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))
del clf, train_x, train_y, valid_x, valid_y
gc.collect()
if not debug:
test_df['TARGET'] = sub_preds
print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))
display_importances(feature_importance_df)
return(test_df[['SK_ID_CURR', 'TARGET']])
def display_importances(feature_importance_df_):
cols = feature_importance_df_[["feature", "importance"]].groupby("feature" ).mean().sort_values(by="importance", ascending=False)[:40].index
best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
plt.figure(figsize=(8, 10))
sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False))
plt.title('LightGBM Features(avg over folds)')
plt.tight_layout()
plt.savefig('lgbm_importances01.png' ) | Home Credit Default Risk |
1,114,299 | eval_input_func = tf.estimator.inputs.pandas_input_fn(x=X_test,y=y_test,batch_size=len(X_test),shuffle=False )<prepare_output> | def ANN(X_train,y_train,X_test,y_test,L_dim,num_epochs = 2):
ann = Sequential()
ann.add(Dense(L_dim[0], input_dim=X_train.shape[1], activation='relu'))
ann.add(BatchNormalization())
ann.add(Dropout(0.2))
ann.add(Dense(L_dim[1], activation='relu'))
ann.add(BatchNormalization())
ann.add(Dropout(0.2))
ann.add(Dense(L_dim[2], activation='relu'))
ann.add(BatchNormalization())
ann.add(Dropout(0.2))
ann.add(Dense(L_dim[3], activation='relu'))
ann.add(BatchNormalization())
ann.add(Dropout(0.2))
ann.add(Dense(L_dim[4], activation='relu'))
ann.add(BatchNormalization())
ann.add(Dense(1, activation='sigmoid'))
ann.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
ann.summary()
ann.fit(X_train,
y_train,
epochs=num_epochs,
batch_size=32,
validation_data=(X_test,y_test),
shuffle=True,
verbose=1)
y_pred = ann.predict(X_test)
cm = metrics.confusion_matrix(y_test, y_pred > 0.5)
print(cm)
fpr, tpr, thresholds = metrics.roc_curve(y_test+1, y_pred, pos_label=2)
print(metrics.auc(fpr, tpr))
return(ann ) | Home Credit Default Risk |
1,114,299 | results = model.evaluate(eval_input_func)
results<train_model> | def AE(X):
input_data = Input(shape=(X.shape[1],))
encoded = Dense(128, activation='relu' )(input_data)
encoded = BatchNormalization()(encoded)
encoded = Dense(32, activation='relu' )(encoded)
encoded = BatchNormalization()(encoded)
encoded = Dense(16, activation='relu' )(encoded)
encoded = BatchNormalization(name='encoded_layer' )(encoded)
decoded = Dense(32, activation='relu' )(encoded)
decoded = BatchNormalization()(decoded)
decoded = Dense(64, activation='relu' )(decoded)
decoded = BatchNormalization()(decoded)
decoded = Dense(X.shape[1], activation='linear' )(encoded)
autoencoder = Model(input_data, decoded)
autoencoder.compile(optimizer='adadelta', loss='mean_squared_error')
autoencoder.fit(X, X,epochs=10, batch_size=32,shuffle=True)
return(autoencoder ) | Home Credit Default Risk |
1,114,299 | pred_fn_test = tf.estimator.inputs.pandas_input_fn(x=test, batch_size=len(test), shuffle=False )<predict_on_test> | def submitLGBM(debug=True):
df = getFinalDataSet()
submission = kfold_lightgbm(df, 4, stratified = True)
if not debug:
print("writing the submission file")
submission.to_csv('submission_1.csv', index=False ) | Home Credit Default Risk |
1,114,299 | predictions_test = list(model.predict(input_fn=pred_fn_test))
probs_test = pd.Series([pred['class_ids'][0] for pred in predictions_test] )<define_variables> | def ANN_prediction(df):
feats =[x for x in list(df)if x not in ['SK_ID_CURR','TARGET']]
df[feats] = scaleNfillna(df[feats])
X = df.loc[df['TARGET'].notnull() ,feats].values
y = df[df['TARGET'].notnull() ].TARGET.values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=42)
ros = RandomOverSampler(random_state=0, sampling_strategy=0.4)
X_resampled, y_resampled = ros.fit_sample(X_train, y_train)
L_dim =(128,64,32,16,8)
ann = ANN(X_resampled, y_resampled,X_test,y_test,L_dim,5)
annPred = pd.DataFrame()
annPred['SK_ID_CURR'] = df['SK_ID_CURR']
annPred['annPred'] = ann.predict(df[feats].values)
return(annPred)
def AE_prediction(df):
feats =[x for x in list(df)if x not in ['SK_ID_CURR','TARGET']]
df.loc[:,feats] = scaleNfillna(df.loc[:,feats])
X = df[feats].values
ae = AE(X)
intermediate_layer_model = Model(inputs=ae.input, outputs=ae.get_layer('encoded_layer' ).output)
aePred = pd.DataFrame(columns=['SK_ID_CURR']+['ae'+str(x)for x in range(1,17)])
aePred['SK_ID_CURR'] = df['SK_ID_CURR']
aePred.loc[:,'ae1':'ae16'] = intermediate_layer_model.predict(X)
return(aePred)
def submit() :
df = getFinalDataSet()
annPred = ANN_prediction(df)
df = pd.merge(df,annPred,on='SK_ID_CURR',how='left')
submission = kfold_lightgbm(df, 4, stratified = True)
print("writing the submission file")
submission.to_csv('submission_1.csv', index=False)
def submitWeighted() :
df = getFinalDataSet()
annPred = ANN_prediction(df)
lgbPred = kfold_lightgbm(df, 4, stratified = True)
weighted = pd.merge(lgbPred,annPred,on='SK_ID_CURR',how='left')
weighted['TARGET_avg'] =(0.8)*weighted['TARGET'] +(0.2)*weighted['annPred']
submission = weighted.loc[:,['SK_ID_CURR','TARGET_avg']]
submission.rename(columns={'TARGET_avg': 'TARGET'}, inplace=True)
submission.to_csv('submission_1.csv', index=False ) | Home Credit Default Risk |
1,114,299 | preds_test_1 = []
for pred in predictions_test:
preds_test_1.append(pred['class_ids'][0])
print(len(preds_test_1))<categorify> | def ilo() :
df = getFinalDataSet()
feats =[x for x in list(df)if x not in ['SK_ID_CURR','TARGET']]
df[feats] = scaleNfillna(df[feats])
X = df[feats].values
ae = AE(X)
intermediate_layer_model = Model(inputs=ae.input, outputs=ae.get_layer('encoded_layer' ).output)
X = intermediate_layer_model.predict(X[df["TARGET"].notnull() ])
y = df[df['TARGET'].notnull() ].TARGET.values
kfold_lightgbm(X,y, 2, False, False ) | Home Credit Default Risk |
1,414,913 | preds_test = le.inverse_transform(preds_test_1)
print(type(preds_test))<create_dataframe> | data = pd.read_csv(PATH+"/application_train.csv")
test = pd.read_csv(PATH+"/application_test.csv")
bureau = pd.read_csv(PATH+"/bureau.csv", nrows=50000)
bureau_balance = pd.read_csv(PATH+"/bureau_balance.csv", nrows=50000)
credit_card_balance = pd.read_csv(PATH+"/credit_card_balance.csv", nrows=50000)
installments_payments = pd.read_csv(PATH+"/installments_payments.csv", nrows=50000)
previous_application = pd.read_csv(PATH+"/previous_application.csv", nrows=50000)
POS_CASH_balance = pd.read_csv(PATH+"/POS_CASH_balance.csv", nrows=50000 ) | Home Credit Default Risk |
1,414,913 | df = pd.DataFrame(columns=['Id', 'Class_vgood', 'Class_good', 'Class_acc', 'Class_unacc'])
for i, ids, preds in zip(range(len(test_ids)) , test_ids, preds_test):
if(preds == 'vgood'):
submission = pd.DataFrame({
"Id": ids,
"Class_vgood": 1,
"Class_good": 0,
"Class_acc": 0,
"Class_unacc": 0,
}, index=[i])
df = df.append(submission)
if(preds == 'good'):
submission = pd.DataFrame({
"Id": ids,
"Class_vgood": 0,
"Class_good": 1,
"Class_acc": 0,
"Class_unacc": 0,
}, index=[i])
df = df.append(submission)
if(preds == 'acc'):
submission = pd.DataFrame({
"Id": ids,
"Class_vgood": 0,
"Class_good": 0,
"Class_acc": 1,
"Class_unacc": 0,
}, index=[i])
df = df.append(submission)
if(preds == 'unacc'):
submission = pd.DataFrame({
"Id": ids,
"Class_vgood": 0,
"Class_good": 0,
"Class_acc": 0,
"Class_unacc": 1,
}, index=[i])
df = df.append(submission )<save_to_csv> | import numpy as np | Home Credit Default Risk |
1,414,913 | df.to_csv('sampleSubmission.csv', index=False )<load_from_csv> | import pandas as pd | Home Credit Default Risk |
1,414,913 | train_df = pd.read_csv(".. /input/tmu-inclass-competition/train.csv")
test_df = pd.read_csv(".. /input/tmu-inclass-competition/test.csv")
sub_df = pd.read_csv(".. /input/tmu-inclass-competition/sample_submission.csv" )<count_values> | import pandas as pd | Home Credit Default Risk |
1,414,913 | cat_list = ["jurisdiction_names", "country_code", "smart_location", "property_type", "host_id", "host_response_time", "room_type"]
def preprocess(train_df, test_df):
new_df = pd.concat([train_df, test_df] ).reset_index(drop=True)
d = {}
for s in new_df["calendar_updated"].value_counts().index:
if s == "today":
d[s] = 0
elif s == "yesterday":
d[s] = 1
elif s == "a week ago" or s == "1 week ago":
d[s] = 7
elif s == "never":
d[s] = 9999
elif s[-len("months ago"):] == "months ago":
d[s] = int(s[:-len("months ago")])* 30
elif s[-len("weeks ago"):] == "weeks ago":
d[s] = int(s[:-len("weeks ago")])* 7
elif s[-len("days ago"):] == "days ago":
d[s] = int(s[:-len("days ago")])
else:
print(s)
print("Error")
break
oldest = min(pd.to_datetime(new_df["host_since"]))
newest = max(pd.to_datetime(new_df["host_since"]))
dt = newest - oldest
processed_train_df = train_df.select_dtypes("number" ).drop("listing_id", axis=1)
processed_train_df["host_since"] = pd.to_datetime(train_df["host_since"])
processed_train_df["host_since"] = processed_train_df["host_since"].apply(lambda x:(x - oldest)/dt * 100)
processed_train_df["calendar_updated"] = train_df["calendar_updated"].apply(lambda x: d[x])
processed_train_df["host_response_rate"] = train_df["host_response_rate"].fillna("0%" ).apply(lambda x: int(x[:-1]))
processed_train_df = pd.concat([processed_train_df, pd.get_dummies(train_df["bed_type"])], axis=1, join='inner')
processed_train_df = pd.concat([processed_train_df, pd.get_dummies(train_df["cancellation_policy"])], axis=1, join='inner')
processed_test_df = test_df.select_dtypes("number" ).drop("listing_id", axis=1)
processed_test_df["host_since"] = pd.to_datetime(test_df["host_since"])
processed_test_df["host_since"] = processed_test_df["host_since"].apply(lambda x:(x - oldest)/dt * 100)
processed_test_df["calendar_updated"] = test_df["calendar_updated"].apply(lambda x: d[x])
processed_test_df["host_response_rate"] = test_df["host_response_rate"].fillna("0%" ).apply(lambda x: int(x[:-1]))
processed_test_df = pd.concat([processed_test_df, pd.get_dummies(test_df["bed_type"])], axis=1, join='inner')
processed_test_df = pd.concat([processed_test_df, pd.get_dummies(test_df["cancellation_policy"])], axis=1, join='inner')
for col in ["host_is_superhost", "host_has_profile_pic", "host_identity_verified", "is_location_exact", "has_availability", "requires_license"\
, "instant_bookable", "is_business_travel_ready", "require_guest_profile_picture", "require_guest_phone_verification"]:
processed_train_df[col] = train_df[col].apply(lambda x: 1 if x == "t" else 0)
processed_test_df[col] = test_df[col].apply(lambda x: 1 if x == "t" else 0)
train_df["host_verifications"] = train_df["host_verifications"].apply(lambda x: x.replace("]", "" ).replace("[", "" ).replace("'", "" ).replace(",", ""))
test_df["host_verifications"] = test_df["host_verifications"].apply(lambda x: x.replace("]", "" ).replace("[", "" ).replace("'", "" ).replace(",", ""))
vers = ["email", "phone", "facebook", "google", "weibo", "sent_id" , "reviews", "kba", "jumio", "government_id", "offline_government_id", "selfie", "identity_manual", "sesame", "sesame_offline", "work_email"]
for v in vers:
processed_train_df[v] = train_df["host_verifications"].apply(lambda x: 1 if v in x.split() else 0)
processed_test_df[v] = test_df["host_verifications"].apply(lambda x: 1 if v in x.split() else 0)
for col in cat_list:
le = LabelEncoder()
train_df[col] = train_df[col].fillna("NAN")
test_df[col] = test_df[col].fillna("NAN")
le = le.fit(pd.concat([train_df[col], test_df[col]]))
processed_train_df[col] = le.transform(train_df[col])
processed_test_df[col] = le.transform(test_df[col])
processed_train_df["col1"] = processed_train_df["bedrooms"] / processed_train_df["accommodates"]
processed_train_df["col2"] = processed_train_df["bathrooms"] / processed_train_df["accommodates"]
processed_train_df["col3"] = processed_train_df["beds"] / processed_train_df["bedrooms"]
processed_train_df["col4"] = processed_train_df["bedrooms"] + processed_train_df["bathrooms"]
processed_train_df["col5"] = processed_train_df["bedrooms"] + processed_train_df["bathrooms"] + processed_train_df["accommodates"] + processed_train_df["beds"]
processed_train_df["col6"] = processed_train_df["bedrooms"] / processed_train_df.groupby(['host_id'])['bedrooms'].transform('std')
processed_train_df["col7"] = processed_train_df["bedrooms"] / processed_train_df.groupby(['host_id'])['bedrooms'].transform('mean')
processed_train_df["col8"] = processed_train_df["bedrooms"] / processed_train_df.groupby(['property_type'])['bedrooms'].transform('std')
processed_train_df["col9"] = processed_train_df["bedrooms"] / processed_train_df.groupby(['property_type'])['bedrooms'].transform('mean')
processed_train_df["col10"] = processed_train_df["bedrooms"] / processed_train_df.groupby(['smart_location'])['bedrooms'].transform('std')
processed_train_df["col11"] = processed_train_df["bedrooms"] / processed_train_df.groupby(['smart_location'])['bedrooms'].transform('mean')
processed_train_df["col12"] = processed_train_df["bedrooms"] / processed_train_df.groupby(['room_type'])['bedrooms'].transform('std')
processed_train_df["col13"] = processed_train_df["bedrooms"] / processed_train_df.groupby(['room_type'])['bedrooms'].transform('mean')
processed_train_df["col14"] = processed_train_df["bathrooms"] / processed_train_df.groupby(['host_id'])['bathrooms'].transform('std')
processed_train_df["col15"] = processed_train_df["bathrooms"] / processed_train_df.groupby(['host_id'])['bathrooms'].transform('mean')
processed_train_df["col16"] = processed_train_df["bathrooms"] / processed_train_df.groupby(['property_type'])['bathrooms'].transform('std')
processed_train_df["col17"] = processed_train_df["bathrooms"] / processed_train_df.groupby(['property_type'])['bathrooms'].transform('mean')
processed_train_df["col18"] = processed_train_df["bathrooms"] / processed_train_df.groupby(['smart_location'])['bathrooms'].transform('std')
processed_train_df["col19"] = processed_train_df["bathrooms"] / processed_train_df.groupby(['smart_location'])['bathrooms'].transform('mean')
processed_train_df["col20"] = processed_train_df["bathrooms"] / processed_train_df.groupby(['room_type'])['bathrooms'].transform('std')
processed_train_df["col21"] = processed_train_df["bathrooms"] / processed_train_df.groupby(['room_type'])['bathrooms'].transform('mean')
processed_train_df["col22"] = processed_train_df["accommodates"] / processed_train_df.groupby(['host_id'])['accommodates'].transform('std')
processed_train_df["col23"] = processed_train_df["accommodates"] / processed_train_df.groupby(['host_id'])['accommodates'].transform('mean')
processed_train_df["col24"] = processed_train_df["accommodates"] / processed_train_df.groupby(['property_type'])['accommodates'].transform('std')
processed_train_df["col25"] = processed_train_df["accommodates"] / processed_train_df.groupby(['property_type'])['accommodates'].transform('mean')
processed_train_df["col26"] = processed_train_df["accommodates"] / processed_train_df.groupby(['smart_location'])['accommodates'].transform('std')
processed_train_df["col27"] = processed_train_df["accommodates"] / processed_train_df.groupby(['smart_location'])['accommodates'].transform('mean')
processed_train_df["col28"] = processed_train_df["accommodates"] / processed_train_df.groupby(['room_type'])['accommodates'].transform('std')
processed_train_df["col29"] = processed_train_df["accommodates"] / processed_train_df.groupby(['room_type'])['accommodates'].transform('mean')
processed_test_df["col1"] = processed_test_df["bedrooms"] / processed_test_df["accommodates"]
processed_test_df["col2"] = processed_test_df["bathrooms"] / processed_test_df["accommodates"]
processed_test_df["col3"] = processed_test_df["beds"] / processed_test_df["bedrooms"]
processed_test_df["col4"] = processed_test_df["bedrooms"] + processed_test_df["bathrooms"]
processed_test_df["col5"] = processed_test_df["bedrooms"] + processed_test_df["bathrooms"] + processed_test_df["accommodates"] + processed_test_df["beds"]
processed_test_df["col6"] = processed_test_df["bedrooms"] / processed_test_df.groupby(['host_id'])['bedrooms'].transform('std')
processed_test_df["col7"] = processed_test_df["bedrooms"] / processed_test_df.groupby(['host_id'])['bedrooms'].transform('mean')
processed_test_df["col8"] = processed_test_df["bedrooms"] / processed_test_df.groupby(['property_type'])['bedrooms'].transform('std')
processed_test_df["col9"] = processed_test_df["bedrooms"] / processed_test_df.groupby(['property_type'])['bedrooms'].transform('mean')
processed_test_df["col10"] = processed_test_df["bedrooms"] / processed_test_df.groupby(['smart_location'])['bedrooms'].transform('std')
processed_test_df["col11"] = processed_test_df["bedrooms"] / processed_test_df.groupby(['smart_location'])['bedrooms'].transform('mean')
processed_test_df["col12"] = processed_test_df["bedrooms"] / processed_test_df.groupby(['room_type'])['bedrooms'].transform('std')
processed_test_df["col13"] = processed_test_df["bedrooms"] / processed_test_df.groupby(['room_type'])['bedrooms'].transform('mean')
processed_test_df["col14"] = processed_test_df["bathrooms"] / processed_test_df.groupby(['host_id'])['bathrooms'].transform('std')
processed_test_df["col15"] = processed_test_df["bathrooms"] / processed_test_df.groupby(['host_id'])['bathrooms'].transform('mean')
processed_test_df["col16"] = processed_test_df["bathrooms"] / processed_test_df.groupby(['property_type'])['bathrooms'].transform('std')
processed_test_df["col17"] = processed_test_df["bathrooms"] / processed_test_df.groupby(['property_type'])['bathrooms'].transform('mean')
processed_test_df["col18"] = processed_test_df["bathrooms"] / processed_test_df.groupby(['smart_location'])['bathrooms'].transform('std')
processed_test_df["col19"] = processed_test_df["bathrooms"] / processed_test_df.groupby(['smart_location'])['bathrooms'].transform('mean')
processed_test_df["col20"] = processed_test_df["bathrooms"] / processed_test_df.groupby(['room_type'])['bathrooms'].transform('std')
processed_test_df["col21"] = processed_test_df["bathrooms"] / processed_test_df.groupby(['room_type'])['bathrooms'].transform('mean')
processed_test_df["col22"] = processed_test_df["accommodates"] / processed_test_df.groupby(['host_id'])['accommodates'].transform('std')
processed_test_df["col23"] = processed_test_df["accommodates"] / processed_test_df.groupby(['host_id'])['accommodates'].transform('mean')
processed_test_df["col24"] = processed_test_df["accommodates"] / processed_test_df.groupby(['property_type'])['accommodates'].transform('std')
processed_test_df["col25"] = processed_test_df["accommodates"] / processed_test_df.groupby(['property_type'])['accommodates'].transform('mean')
processed_test_df["col26"] = processed_test_df["accommodates"] / processed_test_df.groupby(['smart_location'])['accommodates'].transform('std')
processed_test_df["col27"] = processed_test_df["accommodates"] / processed_test_df.groupby(['smart_location'])['accommodates'].transform('mean')
processed_test_df["col28"] = processed_test_df["accommodates"] / processed_test_df.groupby(['room_type'])['accommodates'].transform('std')
processed_test_df["col29"] = processed_test_df["accommodates"] / processed_test_df.groupby(['room_type'])['accommodates'].transform('mean')
return processed_train_df, processed_test_df
processed_train_df, processed_test_df = preprocess(train_df, test_df )<compute_test_metric> | import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns | Home Credit Default Risk |
1,414,913 | def rmsle(y_true, y_pred):
assert len(y_true)== len(y_pred)
return np.sqrt(np.mean(np.power(np.log1p(y_true + 1)- np.log1p(y_pred + 1), 2)))
def rmsle_lgb(preds, data):
y_true = np.array(data.get_label())
result = rmsle(preds, y_true)
return 'RMSLE', result, False<init_hyperparams> | data['DAYS_EMPLOYED'] | Home Credit Default Risk |
1,414,913 | params = {
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmsle',
'max_depth': 20,
'max_bin': 200,
'num_leaves': 97,
'min_data_in_leaf': 10,
'learning_rate': 0.0022,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 10,
'min_sum_hessian_in_leaf': 10,
'lambda_l1': 0.01,
'lambda_l2': 0.01,
'verbose': 0,
'metric': 'rmse'
}<prepare_x_and_y> | data['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True ) | Home Credit Default Risk |
1,414,913 | y = processed_train_df["price"].values
X = processed_train_df.drop("price", axis=1 ).values
features = processed_train_df.drop("price", axis=1 ).columns
X_test = processed_test_df.values
cols = processed_train_df.drop("price", axis=1 ).columns.values
categorical_cols = cat_list[:]
feature_importance_df = pd.DataFrame()
N = 5
oof = np.zeros(len(X))
test_preds = np.zeros(len(test_df))
kf = KFold(n_splits=N, shuffle=True, random_state=1)
cv_score = []
for fold_,(train_idx, val_idx)in enumerate(kf.split(X), start=1):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
lgb_train = lgb.Dataset(X_train, y_train, feature_name=processed_train_df.drop("price", axis=1 ).columns.tolist() , categorical_feature=categorical_cols)
lgb_eval = lgb.Dataset(X_val, y_val, reference=lgb_train, feature_name=processed_train_df.drop("price", axis=1 ).columns.tolist() , categorical_feature=categorical_cols)
lgb_reg = lgb.train(params,
lgb_train,
num_boost_round=20000,
valid_sets=lgb_eval,
early_stopping_rounds=100,
verbose_eval=500,
feval = rmsle_lgb)
y_pred = lgb_reg.predict(X_val, num_iteration=lgb_reg.best_iteration)
oof[val_idx] = lgb_reg.predict(X_val, num_iteration=lgb_reg.best_iteration)
test_preds += lgb_reg.predict(X_test, num_iteration=lgb_reg.best_iteration)/ N
cv_score.append(rmsle(y_val, y_pred))
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = features
fold_importance_df["importance"] = lgb_reg.feature_importance(importance_type='gain')
fold_importance_df["fold"] = fold_ + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print(f"fold{fold_}: {cv_score[-1]}
")
sns.residplot(y_pred, y_val, lowess=True, color='g')
plt.show()
print(f"CV RMSLE Score: {sum(cv_score)/len(cv_score)}" )<save_to_csv> | data['CODE_GENDER'].loc[data['CODE_GENDER']=='XNA'] | Home Credit Default Risk |
1,414,913 | sub_df["price"] = test_preds
sub_df.to_csv(f"submission{sum(cv_score)/len(cv_score)}.csv", index=False )<compute_test_metric> | data['CODE_GENDER'].replace({'XNA': 'F'}, inplace=True ) | Home Credit Default Risk |
1,414,913 | rmsle(y, oof )<prepare_x_and_y> | data['CODE_GENDER'].loc[data['CODE_GENDER']=='F'] | Home Credit Default Risk |
1,414,913 | l = []
for idx,(true, pred)in enumerate(zip(y, oof)) :
l.append([np.power(np.log1p(true + 1)- np.log1p(pred + 1), 2), idx])
l.sort(reverse=True)
l_idx = [x[1] for x in l[:len(l)//20]]
l_idx.sort()
idx = []
j = 0
for i in range(len(l)) :
if i == l_idx[j]:
if j < len(l_idx)- 1:
j += 1
else:
idx.append(i)
y = processed_train_df["price"].values
X = processed_train_df.drop("price", axis=1 ).values
X = X[idx]
y = y[idx]<split> | data['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True ) | Home Credit Default Risk |
1,414,913 | N = 5
oof = np.zeros(len(X))
test_preds = np.zeros(len(test_df))
kf = KFold(n_splits=N, shuffle=True, random_state=1)
cv_score = []
for fold_,(train_idx, val_idx)in enumerate(kf.split(X), start=1):
X_train, X_val = X[train_idx], X[val_idx]
y_train, y_val = y[train_idx], y[val_idx]
lgb_train = lgb.Dataset(X_train, y_train, feature_name=processed_train_df.drop("price", axis=1 ).columns.tolist() , categorical_feature=categorical_cols)
lgb_eval = lgb.Dataset(X_val, y_val, reference=lgb_train, feature_name=processed_train_df.drop("price", axis=1 ).columns.tolist() , categorical_feature=categorical_cols)
lgb_reg = lgb.train(params,
lgb_train,
num_boost_round=20000,
valid_sets=lgb_eval,
early_stopping_rounds=100,
verbose_eval=500,
feval = rmsle_lgb)
y_pred = lgb_reg.predict(X_val, num_iteration=lgb_reg.best_iteration)
oof[val_idx] = lgb_reg.predict(X_val, num_iteration=lgb_reg.best_iteration)
test_preds += lgb_reg.predict(X_test, num_iteration=lgb_reg.best_iteration)/ N
cv_score.append(rmsle(y_val, y_pred))
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = features
fold_importance_df["importance"] = lgb_reg.feature_importance(importance_type='gain')
fold_importance_df["fold"] = fold_ + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print(f"fold{fold_}: {cv_score[-1]}
")
sns.residplot(y_pred, y_val, lowess=True, color='g')
plt.show()
print(f"CV RMSLE Score: {sum(cv_score)/len(cv_score)}" )<save_to_csv> | data['YEARS_BUILD_CREDIT'] = data['AMT_CREDIT']/data['YEARS_BUILD_AVG'] | Home Credit Default Risk |
1,414,913 | sub_df["price"] = test_preds
sub_df.to_csv(f"v2_submission{sum(cv_score)/len(cv_score)}.csv", index=False )<prepare_x_and_y> | data['Annuity_Income'] = data['AMT_ANNUITY']/data['AMT_INCOME_TOTAL'] | Home Credit Default Risk |
1,414,913 | ")
<load_from_csv> | data['Income_Cred'] = data['AMT_CREDIT']/data['AMT_INCOME_TOTAL'] | Home Credit Default Risk |
1,414,913 | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
<categorify> | data['EMP_AGE'] = data['DAYS_EMPLOYED']/data['DAYS_BIRTH'] | Home Credit Default Risk |
1,414,913 | class_names = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
train_text = train['comment_text']
test_text = test['comment_text']
all_text = pd.concat([train_text, test_text])
word_vectorizer = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='word',
token_pattern=r'\w{1,}',
stop_words='english',
ngram_range=(1, 1),
max_features=10000)
word_vectorizer.fit(all_text)
train_word_features = word_vectorizer.transform(train_text)
test_word_features = word_vectorizer.transform(test_text)
char_vectorizer = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='char',
stop_words='english',
ngram_range=(2, 5),
max_features=50000)
char_vectorizer.fit(all_text)
train_char_features = char_vectorizer.transform(train_text)
test_char_features = char_vectorizer.transform(test_text)
train_features = hstack([train_char_features, train_word_features])
test_features = hstack([test_char_features, test_word_features])
<import_modules> | data['Income_PP'] = data['AMT_INCOME_TOTAL']/data['CNT_FAM_MEMBERS'] | Home Credit Default Risk |
1,414,913 | from sklearn.svm import LinearSVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.calibration import CalibratedClassifierCV
<create_dataframe> | data['CHILDREN_RATIO'] =(1 + data['CNT_CHILDREN'])/ data['CNT_FAM_MEMBERS'] | Home Credit Default Risk |
1,414,913 | scores = []
submission = pd.DataFrame.from_dict({'id': test['id']})
for class_name in class_names:
train_target = train[class_name]
classifier = LogisticRegression(solver='sag')
cv_score = np.mean(cross_val_score(classifier, train_features, train_target, cv=3, scoring='roc_auc'))
scores.append(cv_score)
print('CV score for class {} is {}'.format(class_name, cv_score))
classifier.fit(train_features, train_target)
submission[class_name] = classifier.predict_proba(test_features)[:, 1]
print('Total CV score is {}'.format(np.mean(scores)))
<feature_engineering> | data['PAYMENTS'] = data['AMT_ANNUITY']/ data['AMT_CREDIT'] | Home Credit Default Risk |
1,414,913 | ")
<define_variables> | data['NEW_CREDIT_TO_GOODS_RATIO'] = data['AMT_CREDIT'] / data['AMT_GOODS_PRICE']
data['GOODS_INCOME'] = data['AMT_GOODS_PRICE']/data['AMT_INCOME_TOTAL'] | Home Credit Default Risk |
1,414,913 | ship_dir = '.. /input/almaz-antey-hackathon-l1/'
train_image_dir = os.path.join(ship_dir, 'train/train')
test_image_dir = os.path.join(ship_dir, 'test/test' )<load_from_csv> | data['Ext_source_mult'] = data['EXT_SOURCE_1'] * data['EXT_SOURCE_2'] * data['EXT_SOURCE_3']
data['Ext_SOURCE_MEAN'] = data[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis = 1)
data['Ext_SOURCE_SD'] = data[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis = 1 ) | Home Credit Default Risk |
1,414,913 | train_df = pd.read_csv(os.path.join(ship_dir, 'train_segmentation.csv'))
sample_sub = pd.read_csv(os.path.join(ship_dir, 'sample_submission.csv'))<categorify> | columns = ['Annuity_Income', 'Income_Cred', 'EMP_AGE', 'Income_PP']
| Home Credit Default Risk |
1,414,913 | montage_rgb = lambda x: np.stack([montage(x[:, :, :, i])for i in range(x.shape[3])], -1)
def multi_rle_encode(img, **kwargs):
labels = label(img)
if img.ndim > 2:
return [rle_encode(np.sum(labels==k, axis=2), **kwargs)for k in np.unique(labels[labels>0])]
else:
return [rle_encode(labels==k, **kwargs)for k in np.unique(labels[labels>0])]
def rle_encode(img, min_max_threshold=1e-4, max_mean_threshold=None):
if np.max(img)< min_max_threshold:
return ''
if max_mean_threshold and np.mean(img)> max_mean_threshold:
return ''
pixels = img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x)for x in runs)
def rle_decode(mask_rle, shape=(768, 768)) :
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int)for x in(s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape ).T
def masks_as_image(in_mask_list, **kwargs):
all_masks = np.zeros(kwargs['shape'], dtype = np.uint8)
for mask in in_mask_list:
if isinstance(mask, str):
all_masks |= rle_decode(mask, **kwargs)
return all_masks
def masks_as_color(in_mask_list, **kwargs):
all_masks = np.zeros(kwargs['shape'], dtype = np.float)
scale = lambda x:(len(in_mask_list)+ x + 1)/(len(in_mask_list)* 2)
for i,mask in enumerate(in_mask_list):
if isinstance(mask, str):
all_masks[:,:] += scale(i)* rle_decode(mask, **kwargs)
return all_masks<drop_column> | test['CODE_GENDER'].replace({'XNA': 'F'}, inplace=True)
test['YEARS_BUILD_CREDIT'] = test['AMT_CREDIT']/test['YEARS_BUILD_AVG']
test['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)
test['Annuity_Income'] = test['AMT_ANNUITY']/test['AMT_INCOME_TOTAL']
test['Income_Cred'] = test['AMT_CREDIT']/test['AMT_INCOME_TOTAL']
test['EMP_AGE'] = test['DAYS_EMPLOYED']/test['DAYS_BIRTH']
test['Income_PP'] = test['AMT_INCOME_TOTAL']/test['CNT_FAM_MEMBERS']
test['CHILDREN_RATIO'] =(1 + test['CNT_CHILDREN'])/ test['CNT_FAM_MEMBERS']
test['PAYMENTS'] = test['AMT_ANNUITY']/ test['AMT_CREDIT']
test['NEW_CREDIT_TO_GOODS_RATIO'] = test['AMT_CREDIT'] / test['AMT_GOODS_PRICE']
test['GOODS_INCOME'] = test['AMT_GOODS_PRICE']/test['AMT_INCOME_TOTAL']
test['Ext_source_mult'] = test['EXT_SOURCE_1'] * test['EXT_SOURCE_2'] * test['EXT_SOURCE_3']
test['Ext_SOURCE_MEAN'] = test[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis = 1)
test['Ext_SOURCE_SD'] = test[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis = 1 ) | Home Credit Default Risk |
1,414,913 | generator = ship_generator(train_df, train_image_dir, batch_size=4 )<compute_test_metric> | bureau_new = bureau | Home Credit Default Risk |
1,414,913 | def dice_coef2(y_true, y_pred):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
union = np.sum(y_true_f)+ np.sum(y_pred_f)
if union==0: return 1
intersection = np.sum(y_true_f * y_pred_f)
return 2.* intersection / union<categorify> | group = bureau_new[['SK_ID_CURR', 'DAYS_CREDIT']].groupby('SK_ID_CURR')['DAYS_CREDIT'].count().reset_index().rename(index=str, columns={'DAYS_CREDIT': 'BUREAU_LOAN_COUNT'} ) | Home Credit Default Risk |
1,414,913 | val_data = train_df.sample(200, replace=False ).reset_index()
val_data.EncodedPixels = val_data.EncodedPixels.map(lambda x: rle_decode(x))<categorify> | bureau_new = bureau_new.merge(group, how = 'left', on = 'SK_ID_CURR' ) | Home Credit Default Risk |
1,414,913 | pbar = tqdm(val_data.index)
predicts = []
for idx in pbar:
fpath = os.path.join(train_image_dir, val_data.iloc[idx].ImageId)
image, mask = find_mask(fpath)
predicts.append(mask )<compute_test_metric> | del group | Home Credit Default Risk |
1,414,913 | dice = np.mean([dice_coef2(y, predict)for y, predict in zip(val_data.EncodedPixels.to_numpy() , predicts)] )<train_model> | group = bureau_new[['SK_ID_CURR', 'CREDIT_TYPE']].groupby('SK_ID_CURR')['CREDIT_TYPE'].nunique().reset_index().rename(index=str, columns = {'CREDIT_TYPE': 'LOAN_TYPES_PER_CUST'} ) | Home Credit Default Risk |
1,414,913 | print(f'DICE = {dice}' )<categorify> | bureau_new = bureau_new.merge(group,on = ['SK_ID_CURR'], how = 'left')
del group | Home Credit Default Risk |
1,414,913 | pbar = tqdm(sample_sub.index[:])
for idx in pbar:
fpath = os.path.join(test_image_dir, sample_sub.iloc[idx].ImageId)
image, mask = find_mask(fpath)
encode_mask = rle_encode(mask)
sample_sub.iloc[idx].EncodedPixels = encode_mask<save_to_csv> | bureau_new["AVERAGE_LOAN_TYPE"] = bureau_new['BUREAU_LOAN_COUNT']/bureau_new['LOAN_TYPES_PER_CUST'] | Home Credit Default Risk |
1,414,913 | sample_sub.to_csv('submission.csv', index=False)
sample_sub.head()<load_from_csv> | replace = {'Active': 1, 'Closed':0, 'Sold': 1, 'Bad debt': 1}
bureau_new['CREDIT_ACTIVE'] = bureau_new['CREDIT_ACTIVE'].replace(replace ) | Home Credit Default Risk |
1,414,913 | test = pd.read_csv('.. /input/test.csv')
train = df = pd.read_csv('.. /input/train.csv' )<concatenate> | gp = bureau_new.groupby('SK_ID_CURR')['CREDIT_ACTIVE'].mean().reset_index().rename(index=str, columns={'CREDIT_ACTIVE': 'ACTIVE_LOANS_PERCENTAGE'} ) | Home Credit Default Risk |
1,414,913 | df = pd.concat([train, test] )<define_variables> | bureau_new = bureau_new.merge(gp, on = 'SK_ID_CURR', how = 'left' ) | Home Credit Default Risk |
1,414,913 | numero = [c for c in df.columns if c not in texto]
numero = [c for c in numero if c not in ['nota_mat', 'codigo_mun', 'Unnamed: 0']]<statistical_test> | del gp | Home Credit Default Risk |
1,414,913 | def outliers_iqr(ys):
quartile_1, quartile_3 = np.percentile(ys, [25, 75])
iqr = quartile_3 - quartile_1
lower_bound = quartile_1 -(iqr * 1.5)
upper_bound = quartile_3 +(iqr * 1.5)
return np.where(( ys > upper_bound)|(ys < lower_bound))<define_variables> | def rep(x):
if x<0:
y=0
else:
y=1
return y | Home Credit Default Risk |
1,414,913 | for a in numero:
quartile_1, quartile_3 = np.percentile(df[a], [25, 75])
iqr = quartile_3 - quartile_1
lower_bound = quartile_1 -(iqr * 1.5)
upper_bound = quartile_3 +(iqr * 1.5)
df[a][df[a] < lower_bound] = None
df[a][df[a] > upper_bound] = None<define_variables> | bureau_new['CREDIT_ENDDATE_BINARY'] = bureau_new['DAYS_CREDIT_ENDDATE'].apply(lambda x: rep(x)) | Home Credit Default Risk |
1,414,913 | texto = [c for c in texto if c not in ['codigo_mun']]<data_type_conversions> | grp = bureau_new.groupby('SK_ID_CURR')['CREDIT_ENDDATE_BINARY'].mean().reset_index().rename(index=str, columns={'CREDIT_ENDDATE_BINARY': 'CREDIT_ENDDATE_PERCENTAGE'} ) | Home Credit Default Risk |
1,414,913 | for c in texto:
df[c]=df[c].astype('category' ).cat.codes<define_variables> | bureau_new = bureau_new.merge(grp, on = 'SK_ID_CURR', how = 'left')
del grp | Home Credit Default Risk |
1,414,913 | feats = [c for c in df.columns if c not in ['nota_mat','codigo_mun', 'Unnamed: 0']]<groupby> | num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'DAYS_CREDIT_UPDATE': ['mean'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean'],
'CNT_CREDIT_PROLONG': ['sum'],
} | Home Credit Default Risk |
1,414,913 | nme = df.groupby(['estado'], as_index=False ).mean()
for c in nme.columns:
nme[c].fillna(nme[c].mean() , inplace=True )<merge> | bureau_agg = bureau_new.groupby('SK_ID_CURR' ).agg({**num_aggregations} ) | Home Credit Default Risk |
1,414,913 | df2 = pd.merge(df, nme, left_on='estado', right_on='estado', how='left', suffixes=('', '_mean'))
df2['estado_mean']=df2['estado']<define_variables> | bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist() ])
bureau_agg.reset_index(inplace=True ) | Home Credit Default Risk |
1,414,913 | feats2 = [c for c in feats if c not in ['codigo_mun']]<data_type_conversions> | bureau_merge = bureau_new.merge(bureau_agg, on = 'SK_ID_CURR', how = 'left')
del bureau_agg | Home Credit Default Risk |
1,414,913 | for c in feats2:
df[c].fillna(df2[c+'_mean'], inplace=True )<feature_engineering> | buro_cat_features = [bcol for bcol in bureau_merge.columns if bureau_merge[bcol].dtype == 'object'] | Home Credit Default Risk |
1,414,913 | df['nota_mat'] = np.log(df['nota_mat'] )<import_modules> | buro = pd.get_dummies(bureau_merge, columns=buro_cat_features ) | Home Credit Default Risk |
1,414,913 | import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
<set_options> | cat_columns = [col for col in bureau_balance.columns if bureau_balance[col].dtype == 'object'] | Home Credit Default Risk |
1,414,913 | plt.rcParams['figure.figsize'] =(20,30 )<prepare_x_and_y> | bureau_balance = pd.get_dummies(bureau_balance,cat_columns, dummy_na = True ) | Home Credit Default Risk |
1,414,913 | test = df[df['nota_mat'].isnull() ]
train = df[df['nota_mat'].notnull() ]<import_modules> | bb_group = bureau_balance.groupby('SK_ID_BUREAU' ).agg(['min', 'max', 'mean'] ) | Home Credit Default Risk |
1,414,913 | from sklearn.model_selection import train_test_split<import_modules> | bb_group.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_group.columns.tolist() ])
bb_group.reset_index(inplace=True)
| Home Credit Default Risk |
1,414,913 | from sklearn.model_selection import train_test_split<split> | buro = buro.merge(bb_group, on = 'SK_ID_BUREAU', how = 'left' ) | Home Credit Default Risk |
1,414,913 | train, valid = train_test_split(train, random_state = 42 )<find_best_params> | avg_buro = buro.groupby('SK_ID_CURR' ).mean() | Home Credit Default Risk |
1,414,913 | rf = RandomForestRegressor(random_state = 42)
print('Parameters currently in use:
')
pprint(rf.get_params() )<find_best_params> | avg_buro['buro_count'] = buro[['SK_ID_BUREAU', 'SK_ID_CURR']].groupby('SK_ID_CURR' ).count() ['SK_ID_BUREAU']
del avg_buro['SK_ID_BUREAU'], bb_group | Home Credit Default Risk |
1,414,913 | best = rf_random.best_params_
best2 = {'n_estimators': 800,
'min_samples_split': 2,
'min_samples_leaf': 2,
'max_features': 'sqrt',
'max_depth': 50,
'bootstrap': False}
best<choose_model_class> | cat_columns = [col for col in installments_payments.columns if installments_payments[col].dtype == 'object'] | Home Credit Default Risk |
1,414,913 | best_random = RandomForestRegressor(n_estimators= 800, min_samples_split = 2, min_samples_leaf= 2, max_features = 'sqrt', max_depth =50, bootstrap= False )<compute_test_metric> | installments_payments = pd.get_dummies(installments_payments,cat_columns, dummy_na = True ) | Home Credit Default Risk |
1,414,913 | def evaluate(model, test_features, test_labels):
predictions = model.predict(test_features)
errors = abs(predictions - test_labels)
mape = 100 * np.mean(errors / test_labels)
accuracy = 100 - mape
print('Model Performance')
print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))
print('Accuracy = {:0.2f}%.'.format(accuracy))
return accuracy<train_model> | installments_payments['AMOUNT_DIFF'] = installments_payments['AMT_INSTALMENT'] - installments_payments['AMT_PAYMENT']
| Home Credit Default Risk |
1,414,913 | base_model = RandomForestRegressor(n_estimators = 10, random_state = 42)
base_model.fit(train[feats], train['nota_mat'])
base_accuracy = evaluate(base_model, valid[feats], valid['nota_mat'] )<train_model> | installments_payments['AMOUNT_PERC'] = installments_payments['AMT_PAYMENT']/installments_payments['AMT_INSTALMENT'] | Home Credit Default Risk |
1,414,913 | best_random.fit(train[feats], train['nota_mat'])
random_accuracy = evaluate(best_random, valid[feats], valid['nota_mat'] )<compute_test_metric> | installments_payments['DAYS_P'] = installments_payments['DAYS_ENTRY_PAYMENT']-installments_payments['DAYS_INSTALMENT']
installments_payments['DAYS_I'] = installments_payments['DAYS_INSTALMENT']-installments_payments['DAYS_ENTRY_PAYMENT'] | Home Credit Default Risk |
1,414,913 | print('Improvement of {:0.2f}%.'.format(100 *(random_accuracy - base_accuracy)/ base_accuracy))<predict_on_test> | aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DAYS_P': ['max', 'mean', 'sum'],
'DAYS_I': ['max', 'mean', 'sum'],
'AMOUNT_DIFF': ['max', 'mean', 'sum', 'var'],
'AMOUNT_PERC': ['max', 'mean', 'sum', 'var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']
} | Home Credit Default Risk |
1,414,913 | test['nota_mat'] = best_random.predict(test[feats] )<data_type_conversions> | installments_payments_agg = installments_payments.groupby('SK_ID_CURR' ).agg(aggregations ) | Home Credit Default Risk |
1,414,913 | test['nota_mat'] = np.exp(test['nota_mat'])
test['codigo_mun'] = test['codigo_mun'].apply(lambda x: x.replace('ID_ID_', ''))
<save_to_csv> | installments_payments_agg.columns = pd.Index(['INSTALL_' + e[0] + "_" + e[1].upper() for e in installments_payments_agg.columns.tolist() ])
installments_payments_agg.reset_index(inplace=True)
| Home Credit Default Risk |
1,414,913 | test[['codigo_mun','nota_mat']].to_csv('rf.csv', index=False )<load_from_csv> | installments_payments = installments_payments.merge(installments_payments_agg, on = 'SK_ID_CURR',how = 'left' ) | Home Credit Default Risk |
1,414,913 | df = pd.read_csv(".. /input/train.csv")
dfTeste = pd.read_csv(".. /input/test.csv" )<prepare_x_and_y> | previous_application['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
previous_application['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
previous_application['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
previous_application['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
previous_application['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
previous_application['INTEREST_PERC'] =(previous_application['RATE_INTEREST_PRIMARY']/100)*previous_application['AMT_DOWN_PAYMENT']
previous_application['INTEREST_ANN_PERC'] =(previous_application['RATE_INTEREST_PRIMARY']/100)*previous_application['AMT_ANNUITY']
previous_application['INTEREST_CREDIT_PERC'] =(previous_application['RATE_INTEREST_PRIMARY']/100)*previous_application['AMT_CREDIT']
previous_application['FIRST_LAST'] = previous_application['DAYS_FIRST_DUE'] - previous_application['DAYS_LAST_DUE']
| Home Credit Default Risk |
1,414,913 | y = 'nota_mat'<split> | previous_application['APPLICATION_ACTUAL_CREDIT'] = previous_application['AMT_APPLICATION']/previous_application['AMT_CREDIT'] | Home Credit Default Risk |
1,414,913 | train, test = train_test_split(df, random_state=42)
train.shape, test.shape<feature_engineering> | num_aggregations = {
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_APPLICATION': ['min', 'max', 'mean'],
'AMT_CREDIT': ['min', 'max', 'mean'],
'INTEREST_CREDIT_PERC': ['min', 'max', 'mean', 'var'],
'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],
'AMT_GOODS_PRICE': ['min', 'max', 'mean'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
'FIRST_LAST': ['mean', 'max', 'min']
}
prev_agg = previous_application.groupby('SK_ID_CURR' ).agg(num_aggregations)
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist() ])
prev_agg.reset_index(inplace=True)
previous_application = previous_application.merge(prev_agg, on = 'SK_ID_CURR', how = 'left')
del prev_agg | Home Credit Default Risk |
1,414,913 | df['codigo_mun'] = df['codigo_mun'].str.replace('ID_ID_','')
df['comissionados_por_servidor'] = df['comissionados_por_servidor'].str.replace('%','')
df['area']=df['area'].str.replace(',','')
df['ranking_igm']=df['ranking_igm'].str.replace('º','')
df['densidade_dem']=df['densidade_dem'].str.replace(',','')
dfTeste['codigo_mun'] = dfTeste['codigo_mun'].str.replace('ID_ID_','')
dfTeste['comissionados_por_servidor'] = dfTeste['comissionados_por_servidor'].str.replace('%','')
dfTeste['area']=dfTeste['area'].str.replace(',','')
dfTeste['ranking_igm']=dfTeste['ranking_igm'].str.replace('º','')
dfTeste['densidade_dem']=dfTeste['densidade_dem'].str.replace(',','')
<data_type_conversions> | approved = previous_application[previous_application['NAME_CONTRACT_STATUS'] == 'Approved'] | Home Credit Default Risk |
1,414,913 | df['codigo_mun']=df['codigo_mun'].values.astype('int64')
df['area']=df['area'].values.astype('float64')
df['densidade_dem']=df['densidade_dem'].values.astype('float64')
dfTeste['codigo_mun']=dfTeste['codigo_mun'].values.astype('int64')
dfTeste['area']=dfTeste['area'].values.astype('float64')
dfTeste['densidade_dem']=dfTeste['densidade_dem'].values.astype('float64')
df.head()
<data_type_conversions> | approved_agg = approved.groupby('SK_ID_CURR' ).agg(num_aggregations ) | Home Credit Default Risk |
1,414,913 | for c in['regiao', 'estado', 'porte']:
df[c] = df[c].astype('category' ).cat.codes
for c in['regiao', 'estado', 'porte']:
dfTeste[c] = dfTeste[c].astype('category' ).cat.codes<data_type_conversions> | approved_agg.columns = pd.Index(['APPROVED_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist() ] ) | Home Credit Default Risk |
1,414,913 | for c in['densidade_dem', 'perc_pop_econ_ativa', 'exp_vida','exp_anos_estudo','gasto_pc_saude','hab_p_medico','gasto_pc_educacao','exp_anos_estudo','idhm']:
df[c] = df[c].fillna(( df[c].mean()))
for c in['densidade_dem', 'perc_pop_econ_ativa', 'exp_vida','exp_anos_estudo','gasto_pc_saude','hab_p_medico','gasto_pc_educacao','exp_anos_estudo','idhm']:
dfTeste[c] = dfTeste[c].fillna(( dfTeste[c].mean()))<define_variables> | previous_application = previous_application.merge(approved_agg, how='left', on='SK_ID_CURR' ) | Home Credit Default Risk |
1,414,913 | feats = ['exp_anos_estudo']<split> | refused = previous_application[previous_application['NAME_CONTRACT_STATUS'] == 'Refused']
refused_agg = refused.groupby('SK_ID_CURR' ).agg(num_aggregations)
refused_agg.columns = pd.Index(['REFUSED_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist() ])
refused_agg.reset_index(inplace=True)
previous_application = previous_application.merge(refused_agg, how='left', on='SK_ID_CURR')
| Home Credit Default Risk |
1,414,913 | train, test = train_test_split(df, random_state=42)
train.shape, test.shape<choose_model_class> | aggregations = {
'MONTHS_BALANCE': ['max', 'mean', 'size'],
'SK_DPD': ['max', 'mean'],
'SK_DPD_DEF': ['max', 'mean']
}
| Home Credit Default Risk |
1,414,913 | rf = RandomForestRegressor(random_state=42, n_jobs=-1,n_estimators=30000,min_samples_leaf=650)
y='nota_mat'
<train_model> | POS_CASH_AGG = POS_CASH_balance.groupby('SK_ID_CURR' ).agg(aggregations ) | Home Credit Default Risk |
1,414,913 | rf.fit(df[feats], df[y] )<import_modules> | POS_CASH_AGG.columns = pd.Index(['POS_CASH_' + e[0] + "_" + e[1].upper() for e in POS_CASH_AGG.columns.tolist() ] ) | Home Credit Default Risk |
1,414,913 | from sklearn.metrics import mean_squared_error
<predict_on_test> | POS_CASH_AGG['COUNT'] = POS_CASH_AGG.groupby('SK_ID_CURR' ).size() | Home Credit Default Risk |
1,414,913 | valid_preds = rf.predict(test[feats])
mean_squared_error(test[y], valid_preds)**(1/2)
<predict_on_test> | cat_columns = [col for col in POS_CASH_balance.columns if POS_CASH_balance[col].dtype == 'object']
POS_CASH_balance = pd.get_dummies(POS_CASH_balance,cat_columns, dummy_na = True)
POS_CASH_balance = POS_CASH_balance.merge(POS_CASH_AGG, how = 'left', on = 'SK_ID_CURR')
POS_CASH_balance.head() | Home Credit Default Risk |
1,414,913 | dfTeste[y]=rf.predict(dfTeste[feats])
<save_to_csv> | POS_CASH_balance = POS_CASH_balance.groupby('SK_ID_CURR' ).mean().reset_index() | Home Credit Default Risk |
1,414,913 | dfTeste[['codigo_mun', y]].to_csv('rf3.csv', index=False )<load_from_csv> | del POS_CASH_AGG, POS_CASH_balance['SK_ID_PREV'] | Home Credit Default Risk |
1,414,913 | df = pd.read_csv('.. /input/train.csv' )<load_from_csv> | y = data['TARGET']
del data['TARGET']
categorical_features = [col for col in data.columns if data[col].dtype == 'object']
one_hot_df = pd.concat([data,test])
one_hot_df = pd.get_dummies(one_hot_df, columns=categorical_features)
data = one_hot_df.iloc[:data.shape[0],:]
test = one_hot_df.iloc[data.shape[0]:,]
print(data.shape, test.shape ) | Home Credit Default Risk |
1,414,913 | test = pd.read_csv('.. /input/test.csv' )<feature_engineering> | print('Removing features with more than 80% missing...')
test = test[test.columns[data.isnull().mean() < 0.80]]
data = data[data.columns[data.isnull().mean() < 0.80]]
print(data.shape, test.shape ) | Home Credit Default Risk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.