kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
1,189,554 | dtc = DecisionTreeClassifier(random_state=0 ).fit(x_Train, y_Train[:,1])
score(dtc, x_Test, y_Test[:,1] )<train_model> | print("Start Bureau................ " ) | Home Credit Default Risk |
1,189,554 | etc = ExtraTreesClassifier(n_estimators=10, max_depth=None,
min_samples_split=2, random_state=0 ).fit(x_Train, y_Train[:,1])
score(etc, x_Test, y_Test[:,1] )<train_model> | bureau = pd.read_csv('.. /input/bureau.csv', nrows = num_rows ) | Home Credit Default Risk |
1,189,554 | sgd = SGDClassifier(loss="log", penalty="elasticnet", max_iter=5 ).fit(x_Train, y_Train[:,1])
score(sgd, x_Test, y_Test[:,1] )<prepare_output> | bb = pd.read_csv('.. /input/bureau_balance.csv', nrows = num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category ) | Home Credit Default Risk |
1,189,554 | submission = pd.DataFrame({
"Id": ids.Id,
"Expected": probs[:,1]
})
submission.head()<save_to_csv> | bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU' ).agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist() ])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(columns= 'SK_ID_BUREAU', inplace= True)
del bb, bb_agg
gc.collect() | Home Credit Default Risk |
1,189,554 | submission.to_csv('sampleSubmission.csv', index=False )<set_options> | num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'DAYS_CREDIT_UPDATE': ['min', 'max', 'mean'],
'AMT_ANNUITY': ['max', 'mean'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
} | Home Credit Default Risk |
1,189,554 | plt.style.use('ggplot')
%matplotlib inline
<choose_model_class> | cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist() ] ) | Home Credit Default Risk |
1,189,554 | xgb.XGBClassifier()<load_from_csv> | active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR' ).agg(num_aggregations)
active_agg.columns = pd.Index(['ACT_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(active_agg, how='left')
del active, active_agg
gc.collect()
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR' ).agg(num_aggregations)
closed_agg.columns = pd.Index(['CLS_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(closed_agg, how='left')
del closed, closed_agg, bureau
gc.collect() | Home Credit Default Risk |
1,189,554 | df_train = pd.read_csv('.. /input/homework-for-students3/train.csv', index_col=0)
df_test = pd.read_csv('.. /input/homework-for-students3/test.csv', index_col=0)
print(len(df_test))
print(len(df_train))<load_from_csv> | print("End Bureau................ " ) | Home Credit Default Risk |
1,189,554 | gdp=pd.read_csv('.. /input/homework-for-students3/US_GDP_by_State.csv')
zipdata=pd.read_csv('.. /input/homework-for-students3/free-zipcode-database.csv')
drop_col = ['WorldRegion',
'Country', 'LocationText', 'Location', 'Decommisioned',
'TaxReturnsFiled', 'EstimatedPopulation', 'TotalWages', 'Notes']
zipdata=zipdata.drop(drop_col,axis=1)
state=pd.read_csv('.. /input/homework-for-students3/statelatlong.csv')
spi=pd.read_csv('.. /input/homework-for-students3/spi.csv')
spi['date']=pd.to_datetime(spi['date'])
spi= spi.set_index("date")
spi=spi.asfreq('d', method='ffill')
spi = spi.reset_index()<data_type_conversions> | print("Start previous_application................ " ) | Home Credit Default Risk |
1,189,554 | df_train["issue_d"]=pd.to_datetime(df_train["issue_d"])
df_test["issue_d"]=pd.to_datetime(df_test["issue_d"])
df_train = df_train[df_train.issue_d.dt.year >= 2015]
df_train = df_train[df_train['annual_inc'] < df_train['annual_inc'].quantile(0.999)]
df_train['IDdami']=df_train.index
df_test['IDdami']=df_test.index<data_type_conversions> | prev = pd.read_csv('.. /input/previous_application.csv', nrows = num_rows ) | Home Credit Default Risk |
1,189,554 | df_train["earliest_cr_line"]=pd.to_datetime(df_train["earliest_cr_line"])
df_test["earliest_cr_line"]=pd.to_datetime(df_test["earliest_cr_line"])
df_train["issue_d_unix"] = df_train["issue_d"].view('int64')// 10**9
df_test["issue_d_unix"] = df_test["issue_d"].view('int64')// 10**9
df_train["earliest_cr_line_unix"] = df_train["earliest_cr_line"].view('int64')// 10**9
df_test["earliest_cr_line_unix"] = df_test["earliest_cr_line"].view('int64')// 10**9
df_train["period"]=df_train["issue_d_unix"]-df_train["earliest_cr_line_unix"]
df_test["period"]=df_test["issue_d_unix"]-df_test["earliest_cr_line_unix"]
df_train["period"]=df_train["period"].fillna(0)
df_test["period"]=df_test["period"].fillna(0)
df_train['aaa']=round(df_train['loan_amnt']/df_train['installment'],5)
df_test['aaa']=round(df_test['loan_amnt']/df_test['installment'],5)
df_train['bbb']=round(df_train['loan_amnt']/df_train['annual_inc'],5)
df_test['bbb']=round(df_test['loan_amnt']/df_test['annual_inc'],5)
df_train['ddd']=round(df_train['revol_bal']/df_train['revol_util'],5)
df_test['ddd']=round(df_test['revol_bal']/df_test['revol_util'],5)
df_train['eee']=round(df_train['revol_bal']/df_train['total_acc'],5)
df_test['eee']=round(df_test['revol_bal']/df_test['total_acc'],5)
df_train['fff']=round(df_train['revol_util']/df_train['total_acc'],5)
df_test['fff']=round(df_test['revol_util']/df_test['total_acc'],5)
df_train['aaa_open_acc']=round(df_train['loan_amnt']/df_train['open_acc'],5)
df_test['aaa_open_acc']=round(df_test['loan_amnt']/df_test['open_acc'],5)
<merge> | prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True ) | Home Credit Default Risk |
1,189,554 | df_train = df_train.reset_index()
df_test = df_test.reset_index()
kari_df_train=pd.merge(df_train, state, how='left',left_on='addr_state',right_on='State')
kari_df_test=pd.merge(df_test, state, how='left',left_on='addr_state',right_on='State')
df_train = kari_df_train.set_index("ID")
df_test =kari_df_test.set_index("ID")
df_train=df_train.drop('State',axis=1)
df_test=df_test.drop('State',axis=1 )<merge> | prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT'] | Home Credit Default Risk |
1,189,554 | df_train['dami_year']=df_train.issue_d.dt.year
df_test['dami_year']=int(2015)
df_train = df_train.reset_index()
df_test = df_test.reset_index()
kari_df_train=pd.merge(df_train, gdp, how='left',left_on=['City','dami_year'],right_on=['State','year'])
kari_df_test=pd.merge(df_test, gdp, how='left',left_on=['City','dami_year'],right_on=['State','year'])
df_train = kari_df_train.set_index("ID")
df_test =kari_df_test.set_index("ID")
df_train=df_train.drop(['State','dami_year','year'],axis=1)
df_test=df_test.drop(['State','dami_year','year'],axis=1 )<merge> | num_aggregations = {
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_APPLICATION': ['min', 'max', 'mean'],
'AMT_CREDIT': ['min', 'max', 'mean'],
'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],
'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],
'AMT_GOODS_PRICE': ['min', 'max', 'mean'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
} | Home Credit Default Risk |
1,189,554 | df_train = df_train.reset_index()
df_test = df_test.reset_index()
kari_df_train=pd.merge(df_train, spi, how='left',left_on=['issue_d'],right_on=['date'])
kari_df_test=pd.merge(df_test, spi, how='left',left_on=['issue_d'],right_on=['date'])
df_train = kari_df_train.set_index("ID")
df_test =kari_df_test.set_index("ID")
df_train=df_train.drop(['date'],axis=1)
df_test=df_test.drop(['date'],axis=1 )<data_type_conversions> | cat_aggregations = {}
for cat in cat_cols:
cat_aggregations[cat] = ['mean']
prev_agg = prev.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist() ] ) | Home Credit Default Risk |
1,189,554 | zipdata["Zipcode"]=zipdata["Zipcode"].astype(str)
zipdata["Zipcode"]=zipdata["Zipcode"].str[:3]
zipdata=zipdata[['Zipcode','State','Xaxis', 'Yaxis', 'Zaxis']]
zipdata=zipdata.groupby(['Zipcode','State'],as_index=False ).mean()
<data_type_conversions> | approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
approved_agg = approved.groupby('SK_ID_CURR' ).agg(num_aggregations)
approved_agg.columns = pd.Index(['APR_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist() ])
prev_agg = prev_agg.join(approved_agg, how='left')
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
refused_agg = refused.groupby('SK_ID_CURR' ).agg(num_aggregations)
refused_agg.columns = pd.Index(['REF_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist() ])
prev_agg = prev_agg.join(refused_agg, how='left')
del refused, refused_agg, approved, approved_agg, prev
gc.collect()
| Home Credit Default Risk |
1,189,554 | df_train['zip_code']=df_train['zip_code'].str[:3]
df_test['zip_code']=df_test['zip_code'].str[:3]
df_train["zip_code"]=df_train["zip_code"].astype(str)
df_test["zip_code"]=df_test["zip_code"].astype(str)
<count_duplicates> | print("End previous_application................ " ) | Home Credit Default Risk |
1,189,554 | zipdata[zipdata.duplicated() ]<merge> | print("Start POS_CASH_balance................ " ) | Home Credit Default Risk |
1,189,554 | df_train = df_train.reset_index()
df_test = df_test.reset_index()
kari_df_train=pd.merge(df_train, zipdata, how='left',left_on=['zip_code','addr_state'],right_on=['Zipcode','State'])
kari_df_test=pd.merge(df_test, zipdata, how='left',left_on=['zip_code','addr_state'],right_on=['Zipcode','State'])
df_train = kari_df_train.set_index("ID")
df_test =kari_df_test.set_index("ID")
df_train=df_train.drop(['Zipcode','State'],axis=1)
df_test=df_test.drop(['Zipcode','State'],axis=1 )<categorify> | pos = pd.read_csv('.. /input/POS_CASH_balance.csv', nrows = num_rows ) | Home Credit Default Risk |
1,189,554 | encoder = OrdinalEncoder()
enc_train = encoder.fit_transform(df_train['zip_code'].values)
enc_test = encoder.transform(df_test['zip_code'].values)
df_train = df_train.reset_index()
df_test = df_test.reset_index()
df_train['zip_code_la']=enc_train.iloc[:,0]
df_test['zip_code_la']=enc_test.iloc[:,0]
df_train = df_train.set_index("ID")
df_test =df_test.set_index("ID" )<categorify> | pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)
aggregations = {
'MONTHS_BALANCE': ['max', 'mean', 'size'],
'SK_DPD': ['max', 'mean'],
'SK_DPD_DEF': ['max', 'mean']
} | Home Credit Default Risk |
1,189,554 | zi_cal1='zip_code'
zi_summary1 = df_train[zi_cal1].value_counts()
df_train['zip_code_co'] = df_train[zi_cal1].map(zi_summary1)
df_test['zip_code_co'] = df_test[zi_cal1].map(zi_summary1 )<categorify> | for cat in cat_cols:
aggregations[cat] = ['mean']
pos_agg = pos.groupby('SK_ID_CURR' ).agg(aggregations)
pos_agg.columns = pd.Index(['POS_' + e[0] + "_" + e[1].upper() for e in pos_agg.columns.tolist() ])
pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR' ).size()
del pos
gc.collect() | Home Credit Default Risk |
1,189,554 | encoder = OrdinalEncoder()
enc_train = encoder.fit_transform(df_train['addr_state'].values)
enc_test = encoder.transform(df_test['addr_state'].values)
df_train = df_train.reset_index()
df_test = df_test.reset_index()
df_train['addr_state_la']=enc_train.iloc[:,0]
df_test['addr_state_la']=enc_test.iloc[:,0]
df_train = df_train.set_index("ID")
df_test =df_test.set_index("ID" )<categorify> | print("Start POS_CASH_balance................ " ) | Home Credit Default Risk |
1,189,554 | zi_cal2='addr_state'
zi_summary2 = df_train[zi_cal2].value_counts()
df_train['addr_state_co'] = df_train[zi_cal2].map(zi_summary2)
df_test['addr_state_co'] = df_test[zi_cal2].map(zi_summary2 )<data_type_conversions> | ins = pd.read_csv('.. /input/installments_payments.csv', nrows = num_rows)
ins, cat_cols = one_hot_encoder(ins, nan_as_category= True ) | Home Credit Default Risk |
1,189,554 |
<data_type_conversions> | ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']
ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']
ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']
ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']
ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)
ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0 ) | Home Credit Default Risk |
1,189,554 |
<data_type_conversions> | aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DPD': ['max', 'mean', 'sum'],
'DBD': ['max', 'mean', 'sum'],
'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],
'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
ins_agg = ins.groupby('SK_ID_CURR' ).agg(aggregations)
ins_agg.columns = pd.Index(['INS_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist() ])
ins_agg['INS_COUNT'] = ins.groupby('SK_ID_CURR' ).size()
del ins
gc.collect() | Home Credit Default Risk |
1,189,554 |
<drop_column> | print("End POS_CASH_balance................ " ) | Home Credit Default Risk |
1,189,554 | df_train=df_train.drop(['issue_d','earliest_cr_line'],axis=1)
df_test=df_test.drop(['issue_d','earliest_cr_line'],axis=1)
drop_col=['City','acc_now_delinq']
df_train=df_train.drop(drop_col,axis=1)
df_test=df_test.drop(drop_col,axis=1 )<data_type_conversions> | print("Start credit_card_balance................ " ) | Home Credit Default Risk |
1,189,554 |
<data_type_conversions> | cc = pd.read_csv('.. /input/credit_card_balance.csv', nrows = num_rows ) | Home Credit Default Risk |
1,189,554 |
<categorify> | cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)
cc.drop(columns = ['SK_ID_PREV'], inplace = True)
cc_agg = cc.groupby('SK_ID_CURR' ).agg(['min', 'max', 'mean', 'sum', 'var'])
cc_agg.columns = pd.Index(['CC_' + e[0] + "_" + e[1].upper() for e in cc_agg.columns.tolist() ])
cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR' ).size()
del cc
gc.collect() | Home Credit Default Risk |
1,189,554 | ce_cal2='initial_list_status'
ce_summary2 = df_train[ce_cal2].value_counts()
df_train['initial_list_status'] = df_train[ce_cal2].map(ce_summary2)
df_test['initial_list_status'] = df_test[ce_cal2].map(ce_summary2 )<categorify> | print("End credit_card_balance................ " ) | Home Credit Default Risk |
1,189,554 | ce_cal2='application_type'
ce_summary2 = df_train[ce_cal2].value_counts()
df_train['application_type'] = df_train[ce_cal2].map(ce_summary2)
df_test['application_type'] = df_test[ce_cal2].map(ce_summary2 )<categorify> | with timer("Process bureau and bureau_balance"):
print("Bureau df shape:", bureau_agg.shape)
df = df.join(bureau_agg, how='left',on='SK_ID_CURR')
gc.collect()
with timer("Process previous_applications"):
print("Previous applications df shape:", prev_agg.shape)
df = df.join(prev_agg, how='left', on='SK_ID_CURR')
gc.collect()
with timer("Process POS-CASH balance"):
print("Pos-cash balance df shape:", pos_agg.shape)
df = df.join(pos_agg, how='left', on='SK_ID_CURR')
gc.collect()
with timer("Process installments payments"):
print("Installments payments df shape:", ins_agg.shape)
df = df.join(ins_agg, how='left', on='SK_ID_CURR')
gc.collect()
with timer("Process credit card balance"):
print("Credit card balance df shape:", cc_agg.shape)
df = df.join(cc_agg, how='left', on='SK_ID_CURR')
gc.collect()
del bureau_agg,prev_agg,pos_agg,ins_agg,cc_agg
gc.collect() | Home Credit Default Risk |
1,189,554 | df_train['grade'].unique()
df_train=df_train.replace({'grade':{'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7}})
df_test=df_test.replace({'grade':{'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7}})
df_train["grade"]=df_train["grade"].astype(int)
df_test["grade"]=df_test["grade"].astype(int )<categorify> | print("Done.;.............. ")
| Home Credit Default Risk |
1,189,554 | df_train=df_train.replace({'sub_grade':{'A1':1,'A2':2,'A3':3,'A4':4,'A5':5,
'B1':6,'B2':7,'B3':8,'B4':9,'B5':10,
'C1':11,'C2':12,'C3':13,'C4':14,'C5':15,
'D1':16,'D2':17,'D3':18,'D4':19,'D5':20,
'E1':21,'E2':22,'E3':23,'E4':24,'E5':25,
'F1':26,'F2':27,'F3':28,'F4':29,'F5':30,
'G1':31,'G2':32,'G3':33,'G4':34,'G5':35}})
df_test=df_test.replace({'sub_grade':{'A1':1,'A2':2,'A3':3,'A4':4,'A5':5,
'B1':6,'B2':7,'B3':8,'B4':9,'B5':10,
'C1':11,'C2':12,'C3':13,'C4':14,'C5':15,
'D1':16,'D2':17,'D3':18,'D4':19,'D5':20,
'E1':21,'E2':22,'E3':23,'E4':24,'E5':25,
'F1':26,'F2':27,'F3':28,'F4':29,'F5':30,
'G1':31,'G2':32,'G3':33,'G4':34,'G5':35}})
df_train["sub_grade"]=df_train["sub_grade"].astype(int)
df_test["sub_grade"]=df_test["sub_grade"].astype(int )<feature_engineering> | train_df = df[df['TARGET'].notnull() ]
test_df = df[df['TARGET'].isnull() ] | Home Credit Default Risk |
1,189,554 | in_0=df_train[df_train.loan_condition==0].installment.median()
df_train['in_0_sa'] =df_train['installment']-in_0
df_test['in_0_sa'] =df_test['installment']-in_0
lo_0=df_train[df_train.loan_condition==0].loan_amnt.median()
df_train['lo_0_sa'] =df_train['loan_amnt']-lo_0
df_test['lo_0_sa'] =df_test['loan_amnt']-lo_0
dti_0=df_train[df_train.loan_condition==0].dti.median()
df_train['dti_0_sa'] =df_train['dti']-dti_0
df_test['dti_0_sa'] =df_test['dti']-dti_0
tot_0=df_train[df_train.loan_condition==0].tot_cur_bal.median()
df_train['tot_0_sa'] =df_train['tot_cur_bal']-tot_0
df_test['tot_0_sa'] =df_test['tot_cur_bal']-tot_0
rev_0=df_train[df_train.loan_condition==0].revol_bal.median()
df_train['rev_0_sa'] =df_train['revol_bal']-rev_0
df_test['rev_0_sa'] =df_test['revol_bal']-rev_0
pe_0=df_train[df_train.loan_condition==0].period.median()
df_train['pe_0_sa'] =df_train['period']-pe_0
df_test['pe_0_sa'] =df_test['period']-pe_0
in_1=df_train[df_train.loan_condition==1].installment.median()
df_train['in_1_sa'] =df_train['installment']-in_1
df_test['in_1_sa'] =df_test['installment']-in_1
lo_1=df_train[df_train.loan_condition==1].loan_amnt.median()
df_train['lo_1_sa'] =df_train['loan_amnt']-lo_1
df_test['lo_1_sa'] =df_test['loan_amnt']-lo_1
dti_1=df_train[df_train.loan_condition==1].dti.median()
df_train['dti_1_sa'] =df_train['dti']-dti_1
df_test['dti_1_sa'] =df_test['dti']-dti_1
tot_1=df_train[df_train.loan_condition==1].tot_cur_bal.median()
df_train['tot_1_sa'] =df_train['tot_cur_bal']-tot_1
df_test['tot_1_sa'] =df_test['tot_cur_bal']-tot_1
rev_1=df_train[df_train.loan_condition==1].revol_bal.median()
df_train['rev_1_sa'] =df_train['revol_bal']-rev_1
df_test['rev_1_sa'] =df_test['revol_bal']-rev_1
pe_1=df_train[df_train.loan_condition==1].period.median()
df_train['pe_0_sa'] =df_train['period']-pe_1
df_test['pe_0_sa'] =df_test['period']-pe_1
<data_type_conversions> | train_df = train_df.drop(['index'],axis=1)
test_df = test_df.drop(['index','TARGET'],axis=1)
train_df = train_df.fillna(0)
test_df = test_df.fillna(0 ) | Home Credit Default Risk |
1,189,554 | df_train['home_ownership'].unique()
df_train=df_train.replace({'home_ownership':{'MORTGAGE':3,'RENT':2,'OWN':4,'ANY':1}})
df_test=df_test.replace({'home_ownership':{'MORTGAGE':3,'RENT':2,'OWN':4,'ANY':1}})
df_train["home_ownership"]=df_train["home_ownership"].astype(int)
df_test["home_ownership"]=df_test["home_ownership"].astype(int)
print(len(df_train.columns))
print(df_test.columns )<categorify> | label = u'TARGET'
a = list(train_df.columns)
a.remove(label)
labels = train_df[label]
data_only = train_df[list(a)]
col_name = data_only.columns
X_train, X_test, y_train, y_test = train_test_split(data_only, labels, test_size=0.1,random_state = 42 ) | Home Credit Default Risk |
1,189,554 | summary = df_train['purpose'].value_counts()
summary
df_train['purpose_co'] = df_train['purpose'].map(summary)
df_test['purpose_co'] = df_test['purpose'].map(summary )<categorify> | clf_xgBoost = xgb.XGBClassifier(
learning_rate =0.01, n_estimators=1000, max_depth=4, min_child_weight=4, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic',
nthread=4, scale_pos_weight=2, seed=27)
clf_xgBoost.fit(data_only,labels ) | Home Credit Default Risk |
1,189,554 | df_train=df_train.replace({'emp_length':{'< 1 year':0.5,'1 year':1,'2 years':2,'3 years':3,
'4 years':4,'5 years':5,'6 years':6,'7 years':7,
'8 years':8,'9 years':9,'10+ years':10}})
df_test=df_test.replace({'emp_length':{'< 1 year':0.5,'1 year':1,'2 years':2,'3 years':3,
'4 years':4,'5 years':5,'6 years':6,'7 years':7,
'8 years':8,'9 years':9,'10+ years':10}})
df_train["emp_length"].head()<feature_engineering> | pred = clf_xgBoost.predict_proba(test_df)
test_df['TARGET'] = pred[:, 0] | Home Credit Default Risk |
1,189,554 | <feature_engineering><EOS> | test_df[['SK_ID_CURR', 'TARGET']].to_csv('submission_clf_xgBoost.csv', index= False ) | Home Credit Default Risk |
1,087,344 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<feature_engineering> | plt.style.use('fivethirtyeight')
%matplotlib inline
init_notebook_mode(connected=True)
print(os.listdir(".. /input"))
PATH = ".. /input"
| Home Credit Default Risk |
1,087,344 | df_train['ggg']=round(df_train['loan_amnt']*df_train['sub_grade'],5)
df_test['ggg']=round(df_test['loan_amnt']*df_test['sub_grade'],5)
df_train['hhh']=round(df_train['installment']*df_train['sub_grade'],5)
df_test['hhh']=round(df_test['installment']*df_test['sub_grade'],5)
df_train['iii']=round(df_train['annual_inc']*df_train['sub_grade'],5)
df_test['iii']=round(df_test['annual_inc']*df_test['sub_grade'],5)
df_train['jjj']=round(df_train['dti']*df_train['sub_grade'],5)
df_test['jjj']=round(df_test['dti']*df_test['sub_grade'],5)
df_train['kkk']=round(df_train['open_acc']*df_train['sub_grade'],5)
df_test['kkk']=round(df_test['open_acc']*df_test['sub_grade'],5)
df_train['lll']=round(df_train['revol_bal']*df_train['sub_grade'],5)
df_test['lll']=round(df_test['revol_bal']*df_test['sub_grade'],5)
df_train['mmm']=round(df_train['revol_util']*df_train['sub_grade'],5)
df_test['mmm']=round(df_test['revol_util']*df_test['sub_grade'],5)
df_train['nnn']=round(df_train['total_acc']*df_train['sub_grade'],5)
df_test['nnn']=round(df_test['total_acc']*df_test['sub_grade'],5)
df_train['ooo']=round(df_train['tot_cur_bal']*df_train['sub_grade'],5)
df_test['ooo']=round(df_test['tot_cur_bal']*df_test['sub_grade'],5 )<normalization> | data = pd.read_csv(PATH+"/application_train.csv")
test = pd.read_csv(PATH+"/application_test.csv")
bureau = pd.read_csv(PATH+"/bureau.csv")
bureau_balance = pd.read_csv(PATH+"/bureau_balance.csv")
credit_card_balance = pd.read_csv(PATH+"/credit_card_balance.csv")
installments_payments = pd.read_csv(PATH+"/installments_payments.csv")
previous_application = pd.read_csv(PATH+"/previous_application.csv")
POS_CASH_balance = pd.read_csv(PATH+"/POS_CASH_balance.csv" ) | Home Credit Default Risk |
1,087,344 | df_train[df_train.loan_condition==1].loan_amnt.mean()<normalization> | data = pd.read_csv(PATH+"/application_train.csv", nrows=10000)
test = pd.read_csv(PATH+"/application_test.csv", nrows=10000)
bureau = pd.read_csv(PATH+"/bureau.csv", nrows=10000)
bureau_balance = pd.read_csv(PATH+"/bureau_balance.csv", nrows=10000)
credit_card_balance = pd.read_csv(PATH+"/credit_card_balance.csv", nrows=10000)
installments_payments = pd.read_csv(PATH+"/installments_payments.csv", nrows=10000)
previous_application = pd.read_csv(PATH+"/previous_application.csv", nrows=10000)
POS_CASH_balance = pd.read_csv(PATH+"/POS_CASH_balance.csv", nrows=10000 ) | Home Credit Default Risk |
1,087,344 | df_train[df_train.loan_condition==0].loan_amnt.mean()<count_values> | data['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)
data['CODE_GENDER'].replace({'XNA': 'F'}, inplace=True)
data['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)
data['YEARS_BUILD_CREDIT'] = data['AMT_CREDIT']/data['YEARS_BUILD_AVG']
data['Annuity_Income'] = data['AMT_ANNUITY']/data['AMT_INCOME_TOTAL']
data['Income_Cred'] = data['AMT_CREDIT']/data['AMT_INCOME_TOTAL']
data['EMP_AGE'] = data['DAYS_EMPLOYED']/data['DAYS_BIRTH']
data['Income_PP'] = data['AMT_INCOME_TOTAL']/data['CNT_FAM_MEMBERS']
data['CHILDREN_RATIO'] =(1 + data['CNT_CHILDREN'])/ data['CNT_FAM_MEMBERS']
data['PAYMENTS'] = data['AMT_ANNUITY']/ data['AMT_CREDIT']
data['NEW_CREDIT_TO_GOODS_RATIO'] = data['AMT_CREDIT'] / data['AMT_GOODS_PRICE']
data['GOODS_INCOME'] = data['AMT_GOODS_PRICE']/data['AMT_INCOME_TOTAL']
data['Ext_source_mult'] = data['EXT_SOURCE_1'] * data['EXT_SOURCE_2'] * data['EXT_SOURCE_3']
data['Ext_SOURCE_MEAN'] = data[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis = 1)
data['Ext_SOURCE_SD'] = data[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis = 1)
columns = ['Annuity_Income', 'Income_Cred', 'EMP_AGE', 'Income_PP']
test['CODE_GENDER'].replace({'XNA': 'F'}, inplace=True)
test['YEARS_BUILD_CREDIT'] = test['AMT_CREDIT']/test['YEARS_BUILD_AVG']
test['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)
test['Annuity_Income'] = test['AMT_ANNUITY']/test['AMT_INCOME_TOTAL']
test['Income_Cred'] = test['AMT_CREDIT']/test['AMT_INCOME_TOTAL']
test['EMP_AGE'] = test['DAYS_EMPLOYED']/test['DAYS_BIRTH']
test['Income_PP'] = test['AMT_INCOME_TOTAL']/test['CNT_FAM_MEMBERS']
test['CHILDREN_RATIO'] =(1 + test['CNT_CHILDREN'])/ test['CNT_FAM_MEMBERS']
test['PAYMENTS'] = test['AMT_ANNUITY']/ test['AMT_CREDIT']
test['NEW_CREDIT_TO_GOODS_RATIO'] = test['AMT_CREDIT'] / test['AMT_GOODS_PRICE']
test['GOODS_INCOME'] = test['AMT_GOODS_PRICE']/test['AMT_INCOME_TOTAL']
test['Ext_source_mult'] = test['EXT_SOURCE_1'] * test['EXT_SOURCE_2'] * test['EXT_SOURCE_3']
test['Ext_SOURCE_MEAN'] = test[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis = 1)
test['Ext_SOURCE_SD'] = test[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis = 1 ) | Home Credit Default Risk |
1,087,344 | f = 'purpose'
df_train[f].value_counts() / len(df_train )<count_values> | bureau_new = bureau
group = bureau_new[['SK_ID_CURR', 'DAYS_CREDIT']].groupby('SK_ID_CURR')['DAYS_CREDIT'].count().reset_index().rename(index=str, columns={'DAYS_CREDIT': 'BUREAU_LOAN_COUNT'})
bureau_new = bureau_new.merge(group, how = 'left', on = 'SK_ID_CURR')
bureau_new.head()
del group | Home Credit Default Risk |
1,087,344 | df_test[f].value_counts() / len(df_test )<count_unique_values> | group = bureau_new[['SK_ID_CURR', 'CREDIT_TYPE']].groupby('SK_ID_CURR')['CREDIT_TYPE'].nunique().reset_index().rename(index=str, columns = {'CREDIT_TYPE': 'LOAN_TYPES_PER_CUST'})
bureau_new = bureau_new.merge(group,on = ['SK_ID_CURR'], how = 'left')
bureau_new.head()
del group | Home Credit Default Risk |
1,087,344 | cats = []
for col in df_train.columns:
if df_train[col].dtype == 'object':
cats.append(col)
print(col, df_train[col].nunique())
print(cats )<count_values> | bureau_new["AVERAGE_LOAN_TYPE"] = bureau_new['BUREAU_LOAN_COUNT']/bureau_new['LOAN_TYPES_PER_CUST'] | Home Credit Default Risk |
1,087,344 | print(df_train['title'].unique())
print(len(df_test.columns))
print(len(df_train.columns))<categorify> | replace = {'Active': 1, 'Closed':0, 'Sold': 1, 'Bad debt': 1}
bureau_new['CREDIT_ACTIVE'] = bureau_new['CREDIT_ACTIVE'].replace(replace)
gp = bureau_new.groupby('SK_ID_CURR')['CREDIT_ACTIVE'].mean().reset_index().rename(index=str, columns={'CREDIT_ACTIVE': 'ACTIVE_LOANS_PERCENTAGE'})
bureau_new = bureau_new.merge(gp, on = 'SK_ID_CURR', how = 'left')
bureau_new.head()
del gp | Home Credit Default Risk |
1,087,344 | encoder = OrdinalEncoder()
enc_train = encoder.fit_transform(df_train['emp_title'].values)
enc_test = encoder.transform(df_test['emp_title'].values)
df_train = df_train.reset_index()
df_test = df_test.reset_index()
df_train['emp_title_lab']=enc_train.iloc[:,0]
df_test['emp_title_lab']=enc_test.iloc[:,0]
df_train = df_train.set_index("ID")
df_test =df_test.set_index("ID" )<categorify> | def repl(x):
if x < 0:
y = 0
else:
y= 1
return y
bureau_new['CREDIT_ENDDATE_BINARY'] = bureau_new['DAYS_CREDIT_ENDDATE'].apply(lambda x: repl(x))
grp = bureau_new.groupby('SK_ID_CURR')['CREDIT_ENDDATE_BINARY'].mean().reset_index().rename(index=str, columns={'CREDIT_ENDDATE_BINARY': 'CREDIT_ENDDATE_PERCENTAGE'})
bureau_new = bureau_new.merge(grp, on = 'SK_ID_CURR', how = 'left')
del grp | Home Credit Default Risk |
1,087,344 | ce_cal1='emp_title'
ce_summary1 = df_train[ce_cal1].value_counts()
df_train['emp_title_co'] = df_train[ce_cal1].map(ce_summary1)
df_test['emp_title_co'] = df_test[ce_cal1].map(ce_summary1 )<categorify> | num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'DAYS_CREDIT_UPDATE': ['mean'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean'],
'CNT_CREDIT_PROLONG': ['sum'],
}
bureau_agg = bureau_new.groupby('SK_ID_CURR' ).agg({**num_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist() ])
bureau_agg.reset_index(inplace=True)
bureau_merge = bureau_new.merge(bureau_agg, on = 'SK_ID_CURR', how = 'left')
del bureau_agg | Home Credit Default Risk |
1,087,344 | encoder = OrdinalEncoder()
enc_train = encoder.fit_transform(df_train['title'].values)
enc_test = encoder.transform(df_test['title'].values)
df_train = df_train.reset_index()
df_test = df_test.reset_index()
df_train['title_la']=enc_train.iloc[:,0]
df_test['title_la']=enc_test.iloc[:,0]
df_train = df_train.set_index("ID")
df_test =df_test.set_index("ID")
<categorify> | buro_cat_features = [bcol for bcol in bureau_merge.columns if bureau_merge[bcol].dtype == 'object']
buro = pd.get_dummies(bureau_merge, columns=buro_cat_features)
cat_columns = [col for col in bureau_balance.columns if bureau_balance[col].dtype == 'object']
bureau_balance = pd.get_dummies(bureau_balance,cat_columns, dummy_na = True)
bb_group = bureau_balance.groupby('SK_ID_BUREAU' ).agg(['min', 'max', 'mean'])
bb_group.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_group.columns.tolist() ])
bb_group.reset_index(inplace=True)
buro = buro.merge(bb_group, on = 'SK_ID_BUREAU', how = 'left')
avg_buro = buro.groupby('SK_ID_CURR' ).mean()
avg_buro['buro_count'] = buro[['SK_ID_BUREAU', 'SK_ID_CURR']].groupby('SK_ID_CURR' ).count() ['SK_ID_BUREAU']
del avg_buro['SK_ID_BUREAU'], bb_group | Home Credit Default Risk |
1,087,344 | ce_cal2='title'
ce_summary2 = df_train[ce_cal2].value_counts()
df_train['title_co'] = df_train[ce_cal2].map(ce_summary2)
df_test['title_co'] = df_test[ce_cal2].map(ce_summary2 )<feature_engineering> | cat_columns = [col for col in installments_payments.columns if installments_payments[col].dtype == 'object']
installments_payments = pd.get_dummies(installments_payments,cat_columns, dummy_na = True)
installments_payments['AMOUNT_DIFF'] = installments_payments['AMT_INSTALMENT'] - installments_payments['AMT_PAYMENT']
installments_payments['AMOUNT_PERC'] = installments_payments['AMT_PAYMENT']/installments_payments['AMT_INSTALMENT']
installments_payments['DAYS_P'] = installments_payments['DAYS_ENTRY_PAYMENT']-installments_payments['DAYS_INSTALMENT']
installments_payments['DAYS_I'] = installments_payments['DAYS_INSTALMENT']-installments_payments['DAYS_ENTRY_PAYMENT']
aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DAYS_P': ['max', 'mean', 'sum'],
'DAYS_I': ['max', 'mean', 'sum'],
'AMOUNT_DIFF': ['max', 'mean', 'sum', 'var'],
'AMOUNT_PERC': ['max', 'mean', 'sum', 'var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']
}
for cat in cat_columns:
aggregations[cat] = ['mean']
installments_payments_agg = installments_payments.groupby('SK_ID_CURR' ).agg(aggregations)
installments_payments_agg['INSTAL_COUNT'] = installments_payments.groupby('SK_ID_CURR' ).size()
installments_payments_agg.columns = pd.Index(['INSTALL_' + e[0] + "_" + e[1].upper() for e in installments_payments_agg.columns.tolist() ])
installments_payments = installments_payments.merge(installments_payments_agg, how = 'left', on = 'SK_ID_CURR')
del installments_payments_agg | Home Credit Default Risk |
1,087,344 | df_train['NaN']=df_train.isnull().sum(axis=1)
df_test['NaN']=df_test.isnull().sum(axis=1)
df_train['NaN']=df_train["NaN"].fillna(0)
df_test['NaN']=df_test["NaN"].fillna(0)
<drop_column> | previous_application['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
previous_application['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
previous_application['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
previous_application['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
previous_application['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
previous_application['INTEREST_PERC'] =(previous_application['RATE_INTEREST_PRIMARY']/100)*previous_application['AMT_DOWN_PAYMENT']
previous_application['INTEREST_ANN_PERC'] =(previous_application['RATE_INTEREST_PRIMARY']/100)*previous_application['AMT_ANNUITY']
previous_application['INTEREST_CREDIT_PERC'] =(previous_application['RATE_INTEREST_PRIMARY']/100)*previous_application['AMT_CREDIT']
previous_application['FIRST_LAST'] = previous_application['DAYS_FIRST_DUE'] - previous_application['DAYS_LAST_DUE']
previous_application['APPLICATION_ACTUAL_CREDIT'] = previous_application['AMT_APPLICATION']/previous_application['AMT_CREDIT']
num_aggregations = {
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_APPLICATION': ['min', 'max', 'mean'],
'AMT_CREDIT': ['min', 'max', 'mean'],
'INTEREST_CREDIT_PERC': ['min', 'max', 'mean', 'var'],
'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],
'AMT_GOODS_PRICE': ['min', 'max', 'mean'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
'FIRST_LAST': ['mean', 'max', 'min']
}
prev_agg = previous_application.groupby('SK_ID_CURR' ).agg({**num_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist() ])
previous_application = previous_application.merge(prev_agg, on = 'SK_ID_CURR', how = 'left')
del prev_agg | Home Credit Default Risk |
1,087,344 |
df_train=df_train.drop("pub_rec",axis=1)
df_test=df_test.drop("pub_rec",axis=1)
df_train=df_train.drop("annual_inc",axis=1)
df_test=df_test.drop("annual_inc",axis=1)
<prepare_x_and_y> | approved = previous_application[previous_application['NAME_CONTRACT_STATUS'] == 'Approved']
approved_agg = approved.groupby('SK_ID_CURR' ).agg(num_aggregations)
approved_agg.columns = pd.Index(['APPROVED_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist() ])
previous_application = previous_application.join(approved_agg, how='left', on='SK_ID_CURR')
refused = previous_application[previous_application['NAME_CONTRACT_STATUS'] == 'Refused']
refused_agg = refused.groupby('SK_ID_CURR' ).agg(num_aggregations)
refused_agg.columns = pd.Index(['REFUSED_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist() ])
previous_application = previous_application.join(refused_agg, how='left', on='SK_ID_CURR')
previous_application = previous_application.groupby('SK_ID_CURR' ).mean().reset_index(inplace=True)
| Home Credit Default Risk |
1,087,344 | y_train = df_train.loan_condition
X_train = df_train.drop(['loan_condition'], axis=1)
X_test = df_test
<split> | aggregations = {
'MONTHS_BALANCE': ['max', 'mean', 'size'],
'SK_DPD': ['max', 'mean'],
'SK_DPD_DEF': ['max', 'mean']
}
POS_CASH_AGG = POS_CASH_balance.groupby('SK_ID_CURR' ).agg(aggregations)
POS_CASH_AGG.columns = pd.Index(['POS_CASH_' + e[0] + "_" + e[1].upper() for e in POS_CASH_AGG.columns.tolist() ])
POS_CASH_AGG['COUNT'] = POS_CASH_AGG.groupby('SK_ID_CURR' ).size()
cat_columns = [col for col in POS_CASH_balance.columns if POS_CASH_balance[col].dtype == 'object']
POS_CASH_balance = pd.get_dummies(POS_CASH_balance,cat_columns, dummy_na = True)
POS_CASH_balance = POS_CASH_balance.merge(POS_CASH_AGG, how = 'left', on = 'SK_ID_CURR')
POS_CASH_balance.head()
POS_CASH_balance = POS_CASH_balance.groupby('SK_ID_CURR' ).mean().reset_index()
del POS_CASH_AGG, POS_CASH_balance['SK_ID_PREV'] | Home Credit Default Risk |
1,087,344 | col='title'
target = 'loan_condition'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
enc_test = X_test[col].map(summary)
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train['title']=enc_train
X_test['title']=enc_test<categorify> | y = data['TARGET']
del data['TARGET']
categorical_features = [col for col in data.columns if data[col].dtype == 'object']
one_hot_df = pd.concat([data,test])
one_hot_df = pd.get_dummies(one_hot_df, columns=categorical_features)
data = one_hot_df.iloc[:data.shape[0],:]
test = one_hot_df.iloc[data.shape[0]:,]
print(data.shape, test.shape ) | Home Credit Default Risk |
1,087,344 | col='emp_title'
target = 'loan_condition'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
enc_test = X_test[col].map(summary)
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train['emp_title']=enc_train
X_test['emp_title']=enc_test<categorify> | data = data.merge(right=avg_buro.reset_index() , how='left', on='SK_ID_CURR')
test = test.merge(right=avg_buro.reset_index() , how='left', on='SK_ID_CURR')
print(data.shape, test.shape)
data = data.merge(right=previous_application.reset_index() , how='left', on='SK_ID_CURR')
test = test.merge(right=previous_application.reset_index() , how='left', on='SK_ID_CURR')
print(data.shape, test.shape)
data = data.merge(right=POS_CASH_balance.reset_index() , how='left', on='SK_ID_CURR')
test = test.merge(right=POS_CASH_balance.reset_index() , how='left', on='SK_ID_CURR')
print(data.shape, test.shape)
data = data.merge(right=installments_payments.reset_index() , how='left', on='SK_ID_CURR')
test = test.merge(right=installments_payments.reset_index() , how='left', on='SK_ID_CURR')
print(data.shape, test.shape)
gc.collect() | Home Credit Default Risk |
1,087,344 | col='zip_code'
target = 'loan_condition'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
enc_test = X_test[col].map(summary)
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train['zip_code']=enc_train
X_test['zip_code']=enc_test<categorify> | print('Removing features with more than 80% missing...')
test = test[test.columns[data.isnull().mean() < 0.80]]
data = data[data.columns[data.isnull().mean() < 0.80]]
print(data.shape, test.shape ) | Home Credit Default Risk |
1,087,344 | <split><EOS> | gc.enable()
folds = KFold(n_splits=4, shuffle=True, random_state=546789)
oof_preds = np.zeros(data.shape[0])
sub_preds = np.zeros(test.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in data.columns if f not in ['SK_ID_CURR']]
for n_fold,(trn_idx, val_idx)in enumerate(folds.split(data)) :
trn_x, trn_y = data[feats].iloc[trn_idx], y.iloc[trn_idx]
val_x, val_y = data[feats].iloc[val_idx], y.iloc[val_idx]
clf = LGBMClassifier(
n_estimators=10000,
learning_rate=0.03,
num_leaves=34,
colsample_bytree=0.9,
subsample=0.8,
max_depth=8,
reg_alpha=.1,
reg_lambda=.1,
min_split_gain=.01,
min_child_weight=300,
silent=-1,
verbose=-1,
)
clf.fit(trn_x, trn_y,
eval_set= [(trn_x, trn_y),(val_x, val_y)],
eval_metric='auc', verbose=100, early_stopping_rounds=100
)
oof_preds[val_idx] = clf.predict_proba(val_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d AUC : %.6f' %(n_fold + 1, roc_auc_score(val_y, oof_preds[val_idx])))
del clf, trn_x, trn_y, val_x, val_y
gc.collect()
print('Full AUC score %.6f' % roc_auc_score(y, oof_preds))
test['TARGET'] = sub_preds
test[['SK_ID_CURR', 'TARGET']].to_csv('submission1LGBM.csv', index=False)
cols = feature_importance_df[["feature", "importance"]].groupby("feature" ).mean().sort_values(
by="importance", ascending=False)[:50].index
best_features = feature_importance_df.loc[feature_importance_df.feature.isin(cols)]; | Home Credit Default Risk |
1,154,375 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<categorify> | from fastai.imports import *
from fastai.structured import *
from fastai.column_data import *
from torch.nn import functional as F
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split | Home Credit Default Risk |
1,154,375 | col='application_type'
target = 'loan_condition'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
enc_test = X_test[col].map(summary)
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train['application_type_ta']=enc_train
X_test['application_type_ta']=enc_test
<count_missing_values> | df_train = pd.read_feather('.. /input/home-credit-data-processing-for-neural-networks/tables_merged_train')
df_test = pd.read_feather('.. /input/home-credit-data-processing-for-neural-networks/tables_merged_test' ) | Home Credit Default Risk |
1,154,375 | X_train['Yaxis'].isnull().sum()<categorify> | df_train.dtypes.value_counts() | Home Credit Default Risk |
1,154,375 | X_train['Yaxis']=X_train['Yaxis'].replace([np.inf, -np.inf,np.nan], -9999)
X_test['Yaxis']=X_test['Yaxis'].replace([np.inf, -np.inf,np.nan], -9999)
X_train['Yaxis'].astype(str)
X_test['Yaxis'].astype(str)
col='Yaxis'
target = 'loan_condition'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
enc_test = X_test[col].map(summary)
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train['Yaxis_ta']=enc_train
X_test['Yaxis_ta']=enc_test
X_train['Yaxis'].astype(int)
X_test['Yaxis'].astype(int )<data_type_conversions> | cat_vars = [col for col in df_train if df_train[col].dtype.name != 'float64' and df_train[col].dtype.name != 'float32' and len(df_train[col].unique())< 150]
cat_vars.remove('TARGET' ) | Home Credit Default Risk |
1,154,375 | X_train=X_train.replace([np.inf, -np.inf,np.nan], -9999)
X_test=X_test.replace([np.inf, -np.inf,np.nan], -9999)
<split> | cat_sz = [(c, len(df_train[c].unique())+1)for c in cat_vars] | Home Credit Default Risk |
1,154,375 | scores = []
y_pred_test=np.zeros(len(X_test))
df = pd.DataFrame(index=[], columns=[])
df['feature']=X_train.columns
n=10
for i in range(n):
X_train_,X_val,y_train_,y_val=train_test_split(X_train,y_train,test_size=0.3,random_state=i*10)
clf = LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1,
importance_type='split', learning_rate=0.05, max_depth=-1,
min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0,
n_estimators=100, n_jobs=-1, num_leaves=50, objective=None,
random_state=None, reg_alpha=0.0, reg_lambda=0.0, silent=True,
subsample=1.0, subsample_for_bin=200000, subsample_freq=0)
clf.fit(X_train_, y_train_, early_stopping_rounds=200, eval_metric='auc', eval_set=[(X_val, y_val)])
y_pred = clf.predict_proba(X_val)[:,1]
score = roc_auc_score(y_val, y_pred)
scores.append(score)
df[i]=Series(clf.booster_.feature_importance(importance_type='gain'))
y_pred_test+=clf.predict_proba(X_test)[:,1]
df['ave']=df.mean(axis=1)
df['std']=df.std(axis=1)
df=df.sort_values('ave',ascending=False)
ykai=y_pred_test/n
<train_model> | y = np.array(df_train['TARGET'])
df_train.drop('TARGET', axis = 1, inplace=True)
df_to_nn_train, df_to_nn_valid, y_train, y_valid = train_test_split(df_train, y, test_size=0.33, random_state=23, stratify = y ) | Home Credit Default Risk |
1,154,375 | scores_xg=[]
y_pred_test_xg=np.zeros(len(X_test))
df_xg = pd.DataFrame(index=[], columns=[])
df_xg['feature']=X_train.columns
n=10
for i in range(n):
X_train_,X_val,y_train_,y_val=train_test_split(X_train,y_train,test_size=0.3,random_state=i*10)
xg=xgb.XGBClassifier()
xg.fit(X_train_, y_train_,early_stopping_rounds=100, eval_metric='auc', eval_set=[(X_val, y_val)])
y_pred_xg = xg.predict_proba(X_val)[:,1]
score_xg = roc_auc_score(y_val, y_pred_xg)
print(score_xg)
scores_xg.append(score_xg)
y_pred_test_xg+=xg.predict_proba(X_test)[:,1]
ykai_xg=y_pred_test_xg/n
<train_model> | def preprocess_fast_ai(df_to_nn_train, df_to_nn_valid, cat_vars):
for v in cat_vars: df_to_nn_train[v] = df_to_nn_train[v].astype('category' ).cat.as_ordered()
apply_cats(df_to_nn_valid, df_to_nn_train)
df, _, nas, mapper = proc_df(df_to_nn_train, do_scale=True, skip_flds=['SK_ID_CURR'])
df_valid, _, nas, mapper = proc_df(df_to_nn_valid, do_scale=True, na_dict=nas, mapper=mapper, skip_flds=['SK_ID_CURR'])
return df, df_valid | Home Credit Default Risk |
1,154,375 | scores_cb = []
y_pred_test_cb=np.zeros(len(X_test))
df_cb = pd.DataFrame(index=[], columns=[])
df_cb['feature']=X_train.columns
n=10
for i in range(n):
X_train_,X_val,y_train_,y_val=train_test_split(X_train,y_train,test_size=0.3,random_state=i*10)
cb = catboost.CatBoostClassifier(eval_metric='AUC')
cb.fit(X_train_, y_train_, early_stopping_rounds=200,eval_set=[(X_val, y_val)])
y_pred_cb = cb.predict_proba(X_val)[:,1]
y_pred_test_cb+=cb.predict_proba(X_test)[:,1]
ykai_cb=y_pred_test_cb/n<find_best_model_class> | %time df, df_valid = preprocess_fast_ai(df_to_nn_train, df_to_nn_valid, cat_vars ) | Home Credit Default Risk |
1,154,375 |
<define_variables> | emb_szs = [(c, min(50,(c+1)//2)) for _,c in cat_sz] | Home Credit Default Risk |
1,154,375 | y_pred=(ykai+ykai_xg+ykai_cb)/3
<save_to_csv> | md = ColumnarModelData.from_data_frames('', trn_df = df, val_df = df_valid,
trn_y = y_train.astype('int'), val_y = y_valid.astype('int'),
cat_flds=cat_vars, bs=512, is_reg= False ) | Home Credit Default Risk |
1,154,375 | submission = pd.read_csv('.. /input/homework-for-students3/sample_submission.csv', index_col=0)
submission.loan_condition = y_pred
submission.to_csv('submission.csv' )<load_from_csv> | class MixedInputModel(nn.Module):
def __init__(self, emb_szs, n_cont, emb_drop, out_sz, szs, drops,
y_range=None, use_bn=False, is_reg=True, is_multi=False):
super().__init__()
self.embs = nn.ModuleList([nn.Embedding(c, s)for c,s in emb_szs])
for emb in self.embs: emb_init(emb)
n_emb = sum(e.embedding_dim for e in self.embs)
self.n_emb, self.n_cont= n_emb, n_cont
szs = [n_emb + n_cont] + szs
self.lins = nn.ModuleList([
nn.Linear(szs[i], szs[i+1])for i in range(len(szs)-1)])
self.bns = nn.ModuleList([
nn.BatchNorm1d(sz)for sz in szs[1:]])
for o in self.lins: kaiming_normal(o.weight.data)
self.outp = nn.Linear(szs[-1], out_sz)
kaiming_normal(self.outp.weight.data)
self.emb_drop = nn.Dropout(emb_drop)
self.drops = nn.ModuleList([nn.Dropout(drop)for drop in drops])
self.bn = nn.BatchNorm1d(n_cont)
self.use_bn,self.y_range = use_bn,y_range
self.is_reg = is_reg
self.is_multi = is_multi
def forward(self, x_cat, x_cont):
x = []
for i,e in enumerate(self.embs):
x.append(e(x_cat[:,i]))
x = torch.cat(x, 1)
x = self.emb_drop(x)
x2 = self.bn(x_cont)
x = torch.cat([x, x2], 1)
for l,d,b in zip(self.lins, self.drops, self.bns):
x = F.relu(l(x))
if self.use_bn: x = b(x)
x = d(x)
x = self.outp(x)
x = F.log_softmax(x)
return x | Home Credit Default Risk |
1,154,375 | df_train = pd.read_csv('.. /input/train.csv')
df_valid = pd.read_csv('.. /input/valid.csv')
df_sample_submission = pd.read_csv('.. /input/sample_submission.csv' )<drop_column> | m = MixedInputModel(emb_szs, n_cont = len(df.columns)-len(cat_vars),
emb_drop = 0.05, out_sz = 2, szs = [500, 250, 250], drops = [0.1, 0.1, 0.1],
y_range = None, use_bn = False, is_reg = False, is_multi = False)
bm = BasicModel(m.cuda() , 'binary_classifier' ) | Home Credit Default Risk |
1,154,375 | df_train.drop(['article_link'], axis=1)
df_valid.drop(['article_link'], axis=1 )<import_modules> | class StructuredLearner(Learner):
def __init__(self, data, models, **kwargs):
super().__init__(data, models, **kwargs)
self.crit = F.nll_loss
learn = StructuredLearner(md, bm ) | Home Credit Default Risk |
1,154,375 | from sklearn.feature_extraction.text import TfidfVectorizer<feature_engineering> | learn.lr_find(1e-4, 1)
learn.sched.plot(100 ) | Home Credit Default Risk |
1,154,375 | TfidfVec = TfidfVectorizer()<create_dataframe> | lr = 1e-1
learn.fit(lr, 3, metrics=[roc_auc_own] ) | Home Credit Default Risk |
1,154,375 | all_headlines = pd.DataFrame()
all_headlines = pd.concat([df_train, df_valid] )<feature_engineering> | logpreds = learn.predict()
preds = np.exp(logpreds[:,1] ) | Home Credit Default Risk |
1,154,375 | Tfidf_vectorized_data = TfidfVec.fit_transform(all_headlines.headline )<split> | print(classification_report(y_valid,
preds_binary,
target_names= ['0', '1'])) | Home Credit Default Risk |
1,154,375 | df_train_vec = Tfidf_vectorized_data[:18696]
df_valid_vec = Tfidf_vectorized_data[18696:]<prepare_x_and_y> | class ColumnarDataset(Dataset):
def __init__(self, cats, conts, y, is_reg, is_multi):
n = len(cats[0])if cats else len(conts[0])
self.cats = np.stack(cats, 1 ).astype(np.int64)if cats else np.zeros(( n,1))
self.conts = np.stack(conts, 1 ).astype(np.float32)if conts else np.zeros(( n,1))
self.y = np.zeros(( n,1)) if y is None else y
if is_reg:
self.y = self.y[:,None]
self.is_reg = is_reg
self.is_multi = is_multi
def __len__(self): return len(self.y)
def __getitem__(self, idx):
return [self.cats[idx], self.conts[idx], self.y[idx]]
@classmethod
def from_data_frames(cls, df_cat, df_cont, y=None, is_reg=True, is_multi=False):
cat_cols = [c.values for n,c in df_cat.items() ]
cont_cols = [c.values for n,c in df_cont.items() ]
return cls(cat_cols, cont_cols, y, is_reg, is_multi)
@classmethod
def from_data_frame(cls, df, cat_flds, y=None, is_reg=False, is_multi=False):
return cls.from_data_frames(df[cat_flds], df.drop(cat_flds, axis=1), y, is_reg, is_multi)
class ColumnarModelData(ModelData):
def __init__(self, path, trn_ds, val_ds, bs, test_ds=None, shuffle=True):
test_dl = DataLoader(test_ds, bs, shuffle=False, num_workers=1)if test_ds is not None else None
super().__init__(path, DataLoader(trn_ds, bs, shuffle=shuffle, num_workers=1),
DataLoader(val_ds, bs*2, shuffle=False, num_workers=1), test_dl)
@classmethod
def from_data_frames(cls, path, trn_df, trn_y, cat_flds, bs, val_df = None, val_y = None, is_reg = False, is_multi = False, test_df=None):
trn_ds = ColumnarDataset.from_data_frame(trn_df, cat_flds, trn_y, is_reg, is_multi)
val_ds = ColumnarDataset.from_data_frame(val_df, cat_flds, val_y, is_reg, is_multi)if val_df is not None else None
test_ds = ColumnarDataset.from_data_frame(test_df, cat_flds, None, is_reg, is_multi)if test_df is not None else None
return cls(path, trn_ds, val_ds, bs, test_ds=test_ds)
@classmethod
def from_data_frame(cls, path, val_idxs, df, y, cat_flds, bs, is_reg=True, is_multi=False, test_df=None):
(( val_df, trn_df),(val_y, trn_y)) = split_by_idx(val_idxs, df, y)
return cls.from_data_frames(path, trn_df, val_df, trn_y, val_y, cat_flds, bs, is_reg, is_multi, test_df=test_df ) | Home Credit Default Risk |
1,154,375 | y_train = df_train.is_sarcastic<import_modules> | train_ids = df_train['SK_ID_CURR']
test_ids = df_test['SK_ID_CURR']
%time train_df, test_df = preprocess_fast_ai(df_train, df_test, cat_vars ) | Home Credit Default Risk |
1,154,375 | from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score<split> | ros = RandomOverSampler()
df_resampled, y_resampled = ros.fit_sample(df, y_train)
df_resampled = pd.DataFrame(df_resampled, columns = df.columns)
y_valid.mean() , y_resampled.mean() | Home Credit Default Risk |
1,154,375 | df_train_1, df_train_2 = train_test_split(df_train_vec, test_size=0.1)
df_y_1, df_y_2 = train_test_split(y_train, test_size=0.1 )<choose_model_class> | md = ColumnarModelData.from_data_frames('', trn_df = df_resampled,
val_df = df_valid, trn_y = y_resampled.astype('int'),
val_y = y_valid.astype('int'), cat_flds=cat_vars, bs=1024, is_reg = False,
test_df = test_df)
class StructuredLearner(Learner):
def __init__(self, data, models, **kwargs):
super().__init__(data, models, **kwargs)
self.crit = F.nll_loss
m = MixedInputModel(emb_szs, n_cont = len(df.columns)-len(cat_vars),
emb_drop = 0.4, out_sz = 2, szs = [1000, 500],
drops = [0.6, 0.6],y_range = None, use_bn = False, is_reg = False)
bm = BasicModel(m.cuda() , 'binary_classifier')
learn = StructuredLearner(md, bm ) | Home Credit Default Risk |
1,154,375 | model = SGDClassifier(n_jobs=-1, loss='hinge', random_state=42 )<predict_on_test> | learn.lr_find(1e-2, 2)
learn.sched.plot(100 ) | Home Credit Default Risk |
1,154,375 | pred = model.predict(df_valid_vec )<save_to_csv> | lr = 0.1
learn.fit(lr, 3, metrics=[roc_auc_own] ) | Home Credit Default Risk |
1,154,375 | my_submission=pd.DataFrame({'ID': df_valid['ID'], 'is_sarcastic': pred})
my_submission.to_csv('fepas_submission_3.csv',index=False )<set_options> | learn.fit(lr, 2, metrics=[roc_auc_own], cycle_len=1, cycle_mult=2 ) | Home Credit Default Risk |
1,154,375 | %matplotlib inline
print(tf.config.experimental.list_physical_devices('CPU'))
print(tf.config.experimental.list_physical_devices('GPU'))
print(tf.__version__ )<load_from_csv> | print(classification_report(y_valid,
preds_binary,
target_names= ['0', '1']))
false_positive_rate, true_positive_rate, threshold = roc_curve(y_valid,
preds_valid ) | Home Credit Default Risk |
1,154,375 | train_data = pd.read_csv('.. /input/bird-or-aircraft-dafe-open/train_x.csv', index_col=0, header=None)
train_labels = pd.read_csv('.. /input/bird-or-aircraft-dafe-open/train_y.csv', index_col=0)
test_data = pd.read_csv('.. /input/bird-or-aircraft-dafe-open/test_x.csv', index_col=0, header=None )<count_values> | logpreds = learn.predict(True)
preds = np.exp(logpreds[:,1])
submission = pd.DataFrame({'SK_ID_CURR': df_test['SK_ID_CURR'],
'TARGET': preds})
submission.to_csv('submission.csv', index=False, float_format='%.8f' ) | Home Credit Default Risk |
1,154,375 | train_labels['target'].value_counts()<data_type_conversions> | m=learn.model
m.cuda() | Home Credit Default Risk |
1,154,375 | train_data = train_data.to_numpy()
test_data = test_data.to_numpy()
train_labels = train_labels.to_numpy()<train_model> | def get_embeddings(embs, dataframe, ids, cat_vars):
embeddings = np.concatenate([to_np(embs[i](V(dataframe[cat_vars[i]])))for i in range(len(embs)) ], axis = 1)
embedding_columns = ["embedding_"+str(i)for i in range(embeddings.shape[1])]
embedding_df = pd.DataFrame(embeddings, columns=embedding_columns)
embedding_df = pd.concat([embedding_df, ids], axis = 1)
return embedding_df | Home Credit Default Risk |
1,154,375 | <choose_model_class><EOS> | train_embeddings = get_embeddings(m.embs, train_df, train_ids, cat_vars)
test_embeddings = get_embeddings(m.embs, test_df, test_ids, cat_vars)
train_embeddings.to_csv('train_embeddings.csv', index=False)
test_embeddings.to_csv('test_embeddings.csv', index=False ) | Home Credit Default Risk |
9,067,638 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<train_model> | warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning ) | Home Credit Default Risk |
9,067,638 | kf = KFold(n_splits=5, shuffle=True, random_state=42)
epochs_num = 150
all_loss = []
all_accuracy = []
i = 0
for train_index, val_index in kf.split(train_data):
i += 1
print('Processing fold
X_train = train_data[train_index]
y_train = train_labels[train_index]
X_val = train_data[val_index]
y_val = train_labels[val_index]
model = build_model()
history = model.fit(X_train, y_train,
epochs=epochs_num,
batch_size=128,
validation_data=(X_val, y_val),
verbose=0)
loss_history = history.history['val_loss']
all_loss.append(loss_history)
accuracy_history = history.history['val_accuracy']
all_accuracy.append(accuracy_history )<train_model> | print('
'.join([''.join([(' I_Love_Data_Science_'[(x-y)% len('I_Love_Data_Science_')] if(( x*0.05)**2+(y*0.1)**2-1)**3-(x*0.05)**2*(y*0.1)**3 <= 0 else ' ')for x in range(-30, 30)])for y in range(15, -15, -1)])) | Home Credit Default Risk |
9,067,638 | epochs_num = 75
X_train = train_data
y_train = train_labels
model = build_model()
model.fit(X_train, y_train, epochs=epochs_num, batch_size=128, verbose=0);<train_model> | def application_train() :
df = pd.read_csv('.. /input/home-credit-default-risk/application_train.csv')
test_df = pd.read_csv('.. /input/home-credit-default-risk/application_test.csv')
df = df.append(test_df ).reset_index()
df = df[df['CODE_GENDER'] != 'XNA']
lbe = LabelEncoder()
for col in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[col] = lbe.fit_transform(df[col])
df = pd.get_dummies(df, dummy_na = True)
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace = True)
df['NEW_DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['NEW_INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']
df['NEW_INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['NEW_ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
df['NEW_PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']
df.drop("index", axis = 1, inplace = True)
df.columns = pd.Index(["APP_" + col for col in df.columns.tolist() ])
df.rename(columns={"APP_SK_ID_CURR":"SK_ID_CURR"}, inplace = True)
df.rename(columns={"APP_TARGET":"TARGET"}, inplace = True)
return df | Home Credit Default Risk |
9,067,638 | X_test = test_data / 255
X_test = X_test.reshape(test_data.shape[0], 32, 32, 3 )<predict_on_test> | def bureau_bb() :
bb = pd.read_csv('.. /input/home-credit-default-risk/bureau_balance.csv')
bb = pd.get_dummies(bb, dummy_na = True)
agg_list = {"MONTHS_BALANCE":"count",
"STATUS_0":["sum","mean"],
"STATUS_1":["sum"],
"STATUS_2":["sum"],
"STATUS_3":["sum"],
"STATUS_4":["sum"],
"STATUS_5":["sum"],
"STATUS_C":["sum","mean"],
"STATUS_X":["sum","mean"] }
bb_agg = bb.groupby("SK_ID_BUREAU" ).agg(agg_list)
bb_agg.columns = pd.Index([col[0] + "_" + col[1].upper() for col in bb_agg.columns.tolist() ])
bb_agg['NEW_STATUS_SCORE'] = bb_agg['STATUS_1_SUM'] + bb_agg['STATUS_2_SUM']^2 + bb_agg['STATUS_3_SUM']^3 + bb_agg['STATUS_4_SUM']^4 + bb_agg['STATUS_5_SUM']^5
bb_agg.drop(['STATUS_1_SUM','STATUS_2_SUM','STATUS_3_SUM','STATUS_4_SUM','STATUS_5_SUM'], axis=1,inplace=True)
bureau = pd.read_csv('.. /input/home-credit-default-risk/bureau.csv')
bureau_and_bb = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau_and_bb['CREDIT_TYPE'] = bureau_and_bb['CREDIT_TYPE'].replace(['Car loan',
'Mortgage',
'Microloan',
'Loan for business development',
'Another type of loan',
'Unknown type of loan',
'Loan for working capital replenishment',
"Loan for purchase of shares(margin lending)",
'Cash loan(non-earmarked)',
'Real estate loan',
"Loan for the purchase of equipment",
"Interbank credit",
"Mobile operator loan"], 'Rare')
bureau_and_bb['CREDIT_ACTIVE'] = bureau_and_bb['CREDIT_ACTIVE'].replace(['Bad debt','Sold'], 'Active')
bureau_and_bb = pd.get_dummies(bureau_and_bb, columns = ["CREDIT_TYPE","CREDIT_ACTIVE"])
bureau_and_bb.drop(["SK_ID_BUREAU","CREDIT_CURRENCY"], inplace = True, axis = 1)
bureau_and_bb["NEW_MONTHS_CREDIT"]= round(( bureau_and_bb.DAYS_CREDIT_ENDDATE - bureau_and_bb.DAYS_CREDIT)/30)
agg_list = {
"SK_ID_CURR":["count"],
"DAYS_CREDIT":["min","max"],
"CREDIT_DAY_OVERDUE":["sum","mean","max"],
"DAYS_CREDIT_ENDDATE":["max","min"],
"DAYS_ENDDATE_FACT":["max","min"],
"AMT_CREDIT_MAX_OVERDUE":["mean","max","min"],
"CNT_CREDIT_PROLONG":["sum","mean","max","min"],
"AMT_CREDIT_SUM":["mean","max","min"],
"AMT_CREDIT_SUM_DEBT":["sum","mean","max"],
"AMT_CREDIT_SUM_LIMIT":["sum","mean","max"],
'AMT_CREDIT_SUM_OVERDUE':["sum","mean","max"],
'DAYS_CREDIT_UPDATE':["max","min"],
'AMT_ANNUITY':["sum","mean"],
'MONTHS_BALANCE_COUNT':["sum"],
'STATUS_0_SUM':["sum"],
'STATUS_0_MEAN':["mean"],
'STATUS_C_SUM':["sum"],
'STATUS_C_MEAN':["mean"],
'CREDIT_ACTIVE_Active':["sum","mean"],
'CREDIT_ACTIVE_Closed':["sum","mean"],
'CREDIT_TYPE_Rare':["sum","mean"],
'CREDIT_TYPE_Consumer credit':["sum","mean"],
'CREDIT_TYPE_Credit card':["sum","mean"],
"NEW_MONTHS_CREDIT":["count","sum","mean","max","min"]}
bureau_and_bb_agg = bureau_and_bb.groupby("SK_ID_CURR" ).agg(agg_list ).reset_index()
bureau_and_bb_agg.columns = pd.Index(["BB_" + col[0] + "_" + col[1].upper() for col in bureau_and_bb_agg.columns.tolist() ])
bureau_and_bb_agg["BB_NEW_AMT_CREDIT_SUM_RANGE"] = bureau_and_bb_agg["BB_AMT_CREDIT_SUM_MAX"] - bureau_and_bb_agg["BB_AMT_CREDIT_SUM_MIN"]
bureau_and_bb_agg["BB_NEW_DAYS_CREDIT_RANGE"]= round(( bureau_and_bb_agg["BB_DAYS_CREDIT_MAX"] - bureau_and_bb_agg["BB_DAYS_CREDIT_MIN"])/(30 * bureau_and_bb_agg["BB_SK_ID_CURR_COUNT"]))
agg_list = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'DAYS_CREDIT_UPDATE': ['mean'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean'],
'CNT_CREDIT_PROLONG': ['sum']
}
active = bureau_and_bb[bureau_and_bb['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR' ).agg(agg_list)
active_agg.columns = pd.Index(['BB_NEW_ACTIVE_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist() ])
bureau_and_bb_agg.rename(columns = {'BB_SK_ID_CURR_': 'SK_ID_CURR'}, inplace = True)
bureau_and_bb_agg = bureau_and_bb_agg.join(active_agg, how='left', on='SK_ID_CURR')
closed = bureau_and_bb[bureau_and_bb['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR' ).agg(agg_list)
closed_agg.columns = pd.Index(['BB_NEW_CLOSED_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist() ])
bureau_and_bb_agg = bureau_and_bb_agg.join(closed_agg, how='left', on='SK_ID_CURR')
return bureau_and_bb_agg | Home Credit Default Risk |
9,067,638 | y_pred = model.predict(X_test)
y_pred[:10]<create_dataframe> | def installments_payments() :
ins = pd.read_csv('.. /input/home-credit-default-risk/installments_payments.csv')
ins['NEW_DAYS_PAID_EARLIER'] = ins['DAYS_INSTALMENT']-ins['DAYS_ENTRY_PAYMENT']
ins['NEW_NUM_PAID_LATER'] = ins['NEW_DAYS_PAID_EARLIER'].map(lambda x: 1 if x<0 else 0)
agg_list = {'NUM_INSTALMENT_VERSION':['nunique'],
'NUM_INSTALMENT_NUMBER':'max',
'DAYS_INSTALMENT':['min','max'],
'DAYS_ENTRY_PAYMENT':['min','max'],
'AMT_INSTALMENT':['min','max','sum','mean'],
'AMT_PAYMENT':['min','max','sum','mean'],
'NEW_DAYS_PAID_EARLIER':'mean',
'NEW_NUM_PAID_LATER':'sum'}
ins_agg = ins.groupby('SK_ID_PREV' ).agg(agg_list)
ins_agg.columns = pd.Index(["INS_" + e[0] + '_' + e[1].upper() for e in ins_agg.columns.tolist() ])
ins_agg.drop(['INS_DAYS_INSTALMENT_MIN',
'INS_DAYS_INSTALMENT_MAX',
'INS_DAYS_ENTRY_PAYMENT_MIN',
'INS_DAYS_ENTRY_PAYMENT_MAX'],axis=1,inplace=True)
ins_agg['INS_NEW_PAYMENT_PERC'] = ins_agg['INS_AMT_PAYMENT_SUM'] / ins_agg['INS_AMT_INSTALMENT_SUM']
ins_agg['INS_NEW_PAYMENT_DIFF'] = ins_agg['INS_AMT_INSTALMENT_SUM'] - ins_agg['INS_AMT_PAYMENT_SUM']
agg_list_previous_application = {}
for col in ins_agg.columns:
agg_list_previous_application[col] = ['mean',"min","max","sum"]
ins_agg.reset_index(inplace = True)
return agg_list_previous_application, ins_agg | Home Credit Default Risk |
9,067,638 | submission = pd.DataFrame({'id': range(test_data.shape[0]),
'target':(y_pred >= 0.5 ).astype('int' ).flatten()
} )<save_to_csv> | def pos_cash_balance(agg_list_previous_application):
pos = pd.read_csv('.. /input/home-credit-default-risk/POS_CASH_balance.csv')
pos = pd.get_dummies(pos, columns=['NAME_CONTRACT_STATUS'], dummy_na = True)
agg_list = {'MONTHS_BALANCE':['min','max'],
'CNT_INSTALMENT':['min','max'],
'CNT_INSTALMENT_FUTURE':['min','max'],
'SK_DPD':['max','mean'],
'SK_DPD_DEF':['max','mean'],
'NAME_CONTRACT_STATUS_Active':'sum',
'NAME_CONTRACT_STATUS_Amortized debt':'sum',
'NAME_CONTRACT_STATUS_Approved':'sum',
'NAME_CONTRACT_STATUS_Canceled':'sum',
'NAME_CONTRACT_STATUS_Completed':'sum',
'NAME_CONTRACT_STATUS_Demand':'sum',
'NAME_CONTRACT_STATUS_Returned to the store':'sum',
'NAME_CONTRACT_STATUS_Signed':'sum',
'NAME_CONTRACT_STATUS_XNA':'sum',
'NAME_CONTRACT_STATUS_nan':'sum'
}
pos_agg = pos.groupby('SK_ID_PREV' ).agg(agg_list)
pos_agg.columns= pd.Index(["POS_" + e[0] + '_' + e[1].upper() for e in pos_agg.columns.tolist() ])
pos_agg['POS_NEW_IS_CREDIT_NOT_COMPLETED_ON_TIME']=(pos_agg['POS_CNT_INSTALMENT_FUTURE_MIN']==0)&(pos_agg['POS_NAME_CONTRACT_STATUS_Completed_SUM']==0)
pos_agg['POS_NEW_IS_CREDIT_NOT_COMPLETED_ON_TIME']=pos_agg['POS_NEW_IS_CREDIT_NOT_COMPLETED_ON_TIME'].astype(int)
pos_agg.drop(['POS_NAME_CONTRACT_STATUS_Approved_SUM',
'POS_NAME_CONTRACT_STATUS_Amortized debt_SUM',
'POS_NAME_CONTRACT_STATUS_Canceled_SUM',
'POS_NAME_CONTRACT_STATUS_Returned to the store_SUM',
'POS_NAME_CONTRACT_STATUS_Signed_SUM',
'POS_NAME_CONTRACT_STATUS_XNA_SUM',
'POS_NAME_CONTRACT_STATUS_nan_SUM'],axis=1,inplace=True)
for col in pos_agg.columns:
agg_list_previous_application[col] = ['mean',"min","max","sum"]
pos_agg.reset_index(inplace = True)
return agg_list_previous_application, pos_agg | Home Credit Default Risk |
9,067,638 | submission.to_csv('submission.csv', index=False )<save_to_csv> | def credit_card_balance() :
CCB = pd.read_csv('.. /input/home-credit-default-risk/credit_card_balance.csv')
CCB = pd.get_dummies(CCB, columns= ['NAME_CONTRACT_STATUS'])
dropthis = ['NAME_CONTRACT_STATUS_Approved', 'NAME_CONTRACT_STATUS_Demand',
'NAME_CONTRACT_STATUS_Refused', 'NAME_CONTRACT_STATUS_Sent proposal',
'NAME_CONTRACT_STATUS_Signed' ]
CCB = CCB.drop(dropthis, axis=1)
grp = CCB.groupby(by = ['SK_ID_CURR'])['SK_ID_PREV'].nunique().reset_index().rename(index = str, columns = {'SK_ID_PREV': 'NUMBER_OF_LOANS_PER_CUSTOMER'})
CCB = CCB.merge(grp, on = ['SK_ID_CURR'], how = 'left')
grp = CCB.groupby(by = ['SK_ID_CURR', 'SK_ID_PREV'])['CNT_INSTALMENT_MATURE_CUM'].max().reset_index().rename(index = str, columns = {'CNT_INSTALMENT_MATURE_CUM': 'NUMBER_OF_INSTALMENTS'})
grp1 = grp.groupby(by = ['SK_ID_CURR'])['NUMBER_OF_INSTALMENTS'].sum().reset_index().rename(index = str, columns = {'NUMBER_OF_INSTALMENTS': 'TOTAL_INSTALMENTS_OF_ALL_LOANS'})
CCB = CCB.merge(grp1, on = ['SK_ID_CURR'], how = 'left')
CCB['INSTALLMENTS_PER_LOAN'] =(CCB['TOTAL_INSTALMENTS_OF_ALL_LOANS']/CCB['NUMBER_OF_LOANS_PER_CUSTOMER'] ).astype('uint32')
def geciken_gun_hesapla(DPD):
x = DPD.tolist()
c = 0
for i,j in enumerate(x):
if j != 0:
c += 1
return c
grp = CCB.groupby(by = ['SK_ID_CURR', 'SK_ID_PREV'] ).apply(lambda x: geciken_gun_hesapla(x.SK_DPD)).reset_index().rename(index = str, columns = {0: 'NUMBER_OF_DPD'})
grp1 = grp.groupby(by = ['SK_ID_CURR'])['NUMBER_OF_DPD'].mean().reset_index().rename(index = str, columns = {'NUMBER_OF_DPD' : 'DPD_COUNT'})
CCB = CCB.merge(grp1, on = ['SK_ID_CURR'], how = 'left')
def f(min_pay, total_pay):
M = min_pay.tolist()
T = total_pay.tolist()
P = len(M)
c = 0
for i in range(len(M)) :
if T[i] < M[i]:
c += 1
return(100*c)/P
grp = CCB.groupby(by = ['SK_ID_CURR'] ).apply(lambda x: f(x.AMT_INST_MIN_REGULARITY, x.AMT_PAYMENT_CURRENT)).reset_index().rename(index = str, columns = { 0 : 'PERCENTAGE_MIN_MISSED_PAYMENTS'})
CCB = CCB.merge(grp, on = ['SK_ID_CURR'], how = 'left')
grp = CCB.groupby(by = ['SK_ID_CURR'])['AMT_DRAWINGS_ATM_CURRENT'].sum().reset_index().rename(index = str, columns = {'AMT_DRAWINGS_ATM_CURRENT' : 'DRAWINGS_ATM'})
CCB = CCB.merge(grp, on = ['SK_ID_CURR'], how = 'left')
grp = CCB.groupby(by = ['SK_ID_CURR'])['AMT_DRAWINGS_CURRENT'].sum().reset_index().rename(index = str, columns = {'AMT_DRAWINGS_CURRENT' : 'DRAWINGS_TOTAL'})
CCB = CCB.merge(grp, on = ['SK_ID_CURR'], how = 'left')
CCB['CASH_CARD_RATIO1'] =(CCB['DRAWINGS_ATM']/CCB['DRAWINGS_TOTAL'])*100
del CCB['DRAWINGS_ATM']
del CCB['DRAWINGS_TOTAL']
grp = CCB.groupby(by = ['SK_ID_CURR'])['CASH_CARD_RATIO1'].mean().reset_index().rename(index = str, columns ={ 'CASH_CARD_RATIO1' : 'CASH_CARD_RATIO'})
CCB = CCB.merge(grp, on = ['SK_ID_CURR'], how = 'left')
grp = CCB.groupby(by = ['SK_ID_CURR'])['AMT_DRAWINGS_CURRENT'].sum().reset_index().rename(index = str, columns = {'AMT_DRAWINGS_CURRENT' : 'TOTAL_DRAWINGS'})
CCB = CCB.merge(grp, on = ['SK_ID_CURR'], how = 'left')
grp = CCB.groupby(by = ['SK_ID_CURR'])['CNT_DRAWINGS_CURRENT'].sum().reset_index().rename(index = str, columns = {'CNT_DRAWINGS_CURRENT' : 'NUMBER_OF_DRAWINGS'})
CCB = CCB.merge(grp, on = ['SK_ID_CURR'], how = 'left')
CCB['DRAWINGS_RATIO1'] =(CCB['TOTAL_DRAWINGS']/CCB['NUMBER_OF_DRAWINGS'])*100
del CCB['TOTAL_DRAWINGS']
del CCB['NUMBER_OF_DRAWINGS']
grp = CCB.groupby(by = ['SK_ID_CURR'])['DRAWINGS_RATIO1'].mean().reset_index().rename(index = str, columns ={ 'DRAWINGS_RATIO1' : 'DRAWINGS_RATIO'})
CCB = CCB.merge(grp, on = ['SK_ID_CURR'], how = 'left')
del CCB['DRAWINGS_RATIO1']
CCB['CC_COUNT'] = CCB.groupby('SK_ID_CURR' ).size()
CCB_agg = CCB.groupby('SK_ID_CURR' ).agg({
'MONTHS_BALANCE':["sum","mean"],
'AMT_BALANCE':["sum","mean","min","max"],
'AMT_CREDIT_LIMIT_ACTUAL':["sum","mean"],
'AMT_DRAWINGS_ATM_CURRENT':["sum","mean","min","max"],
'AMT_DRAWINGS_CURRENT':["sum","mean","min","max"],
'AMT_DRAWINGS_OTHER_CURRENT':["sum","mean","min","max"],
'AMT_DRAWINGS_POS_CURRENT':["sum","mean","min","max"],
'AMT_INST_MIN_REGULARITY':["sum","mean","min","max"],
'AMT_PAYMENT_CURRENT':["sum","mean","min","max"],
'AMT_PAYMENT_TOTAL_CURRENT':["sum","mean","min","max"],
'AMT_RECEIVABLE_PRINCIPAL':["sum","mean","min","max"],
'AMT_RECIVABLE':["sum","mean","min","max"],
'AMT_TOTAL_RECEIVABLE':["sum","mean","min","max"],
'CNT_DRAWINGS_ATM_CURRENT':["sum","mean"],
'CNT_DRAWINGS_CURRENT':["sum","mean","max"],
'CNT_DRAWINGS_OTHER_CURRENT':["mean","max"],
'CNT_DRAWINGS_POS_CURRENT':["sum","mean","max"],
'CNT_INSTALMENT_MATURE_CUM':["sum","mean","max","min"],
'SK_DPD':["sum","mean","max"],
'SK_DPD_DEF':["sum","mean","max"],
'NAME_CONTRACT_STATUS_Active':["sum","mean","min","max"],
'INSTALLMENTS_PER_LOAN':["sum","mean","min","max"],
'NUMBER_OF_LOANS_PER_CUSTOMER':["mean"],
'DPD_COUNT':["mean"],
'PERCENTAGE_MIN_MISSED_PAYMENTS':["mean"],
'CASH_CARD_RATIO':["mean"],
'DRAWINGS_RATIO':["mean"]})
CCB_agg.columns = pd.Index(['CCB_' + e[0] + "_" + e[1].upper() for e in CCB_agg.columns.tolist() ])
CCB_agg.reset_index(inplace = True)
return CCB_agg | Home Credit Default Risk |
9,067,638 | submission.to_csv('submission.csv', index=False )<set_options> | def previous_application(agg_list_previous_application):
df_prev = pd.read_csv('.. /input/home-credit-default-risk/previous_application.csv')
df_prev["WEEKDAY_APPR_PROCESS_START"] = df_prev["WEEKDAY_APPR_PROCESS_START"].replace(['MONDAY','TUESDAY', 'WEDNESDAY','THURSDAY','FRIDAY'], 'WEEK_DAY')
df_prev["WEEKDAY_APPR_PROCESS_START"] = df_prev["WEEKDAY_APPR_PROCESS_START"].replace(['SATURDAY', 'SUNDAY'], 'WEEKEND')
a = [8,9,10,11,12,13,14,15,16,17]
df_prev["HOUR_APPR_PROCESS_START"] = df_prev["HOUR_APPR_PROCESS_START"].replace(a, 'working_hours')
b = [18,19,20,21,22,23,0,1,2,3,4,5,6,7]
df_prev["HOUR_APPR_PROCESS_START"] = df_prev["HOUR_APPR_PROCESS_START"].replace(b, 'off_hours')
df_prev["DAYS_DECISION"] = [1 if abs(i/(12*30)) <=1 else 0 for i in df_prev.DAYS_DECISION]
df_prev["NAME_TYPE_SUITE"] = df_prev["NAME_TYPE_SUITE"].replace('Unaccompanied', 'alone')
b = ['Family', 'Spouse, partner', 'Children', 'Other_B', 'Other_A', 'Group of people']
df_prev["NAME_TYPE_SUITE"] = df_prev["NAME_TYPE_SUITE"].replace(b, 'not_alone')
a = ['Auto Accessories', 'Jewelry', 'Homewares', 'Medical Supplies', 'Vehicles', 'Sport and Leisure',
'Gardening', 'Other', 'Office Appliances', 'Tourism', 'Medicine', 'Direct Sales', 'Fitness', 'Additional Service',
'Education', 'Weapon', 'Insurance', 'House Construction', 'Animals']
df_prev["NAME_GOODS_CATEGORY"] = df_prev["NAME_GOODS_CATEGORY"].replace(a, 'others')
a = ['Auto technology', 'Jewelry', 'MLM partners', 'Tourism']
df_prev["NAME_SELLER_INDUSTRY"] = df_prev["NAME_SELLER_INDUSTRY"].replace(a, 'others')
df_prev["LOAN_RATE"] = df_prev.AMT_APPLICATION/df_prev.AMT_CREDIT
df_prev["NEW_LOAN_RATE"] = df_prev.AMT_APPLICATION/df_prev.AMT_CREDIT
k = df_prev.DAYS_LAST_DUE_1ST_VERSION - df_prev.DAYS_LAST_DUE
df_prev["NEW_CHURN_PREV"] = [1 if i >= 0 else(0 if i < 0 else "NaN")for i in k]
df_prev[(df_prev['AMT_CREDIT'] == 0)|(df_prev['AMT_GOODS_PRICE'] == 0)]['NEW_INSURANCE'] = np.nan
df_prev['sigorta_miktari'] = df_prev['AMT_CREDIT'] - df_prev['AMT_GOODS_PRICE']
df_prev["NEW_INSURANCE"] = df_prev['sigorta_miktari'].apply(lambda x: 1 if x > 0 else(0 if x <= 0 else np.nan))
df_prev.drop('sigorta_miktari', axis=1, inplace=True)
drop_list = ['AMT_DOWN_PAYMENT', 'SELLERPLACE_AREA', 'CNT_PAYMENT', 'PRODUCT_COMBINATION', 'DAYS_FIRST_DRAWING', 'DAYS_FIRST_DUE',
'DAYS_LAST_DUE_1ST_VERSION', 'DAYS_LAST_DUE','DAYS_TERMINATION','NFLAG_INSURED_ON_APPROVAL']
df_prev.drop(drop_list, axis = 1, inplace = True)
category_columns=[]
for i in df_prev.columns:
if df_prev[i].dtypes == "O":
category_columns.append(i)
df_prev = pd.get_dummies(df_prev, columns = category_columns)
prev_agg_list = {"SK_ID_CURR":["count"],
"AMT_ANNUITY":["max"],
"AMT_APPLICATION":["min","mean","max"],
"AMT_CREDIT":["max"],
"AMT_GOODS_PRICE":["sum", "mean"],
"NFLAG_LAST_APPL_IN_DAY":["sum","mean"],
"RATE_DOWN_PAYMENT":["sum", "mean"],
"RATE_INTEREST_PRIMARY":["sum", "mean"],
"RATE_INTEREST_PRIVILEGED":["sum", "mean"],
"DAYS_DECISION":["sum"],
"NEW_LOAN_RATE":["sum", "mean", "min", "max"],
"NEW_INSURANCE":["sum", "mean"],
"NAME_CONTRACT_TYPE_Cash loans":["sum", "mean"],
"NAME_CONTRACT_TYPE_Consumer loans":["sum", "mean"],
"NAME_CONTRACT_TYPE_Revolving loans":["sum", "mean"],
"NAME_CONTRACT_TYPE_XNA":["sum", "mean"],
"WEEKDAY_APPR_PROCESS_START_WEEKEND":["sum", "mean"],
"WEEKDAY_APPR_PROCESS_START_WEEK_DAY":["sum", "mean"],
"HOUR_APPR_PROCESS_START_off_hours":["sum", "mean"],
"HOUR_APPR_PROCESS_START_working_hours":["sum", "mean"],
"FLAG_LAST_APPL_PER_CONTRACT_N":["sum", "mean"],
"FLAG_LAST_APPL_PER_CONTRACT_Y":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Building a house or an annex":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Business development":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Buying a garage":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Buying a holiday home / land":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Buying a home":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Buying a new car":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Buying a used car":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Car repairs":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Education":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Everyday expenses":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Furniture":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Gasification / water supply":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Hobby":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Journey":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Medicine":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Money for a third person":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Other":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Payments on other loans":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Purchase of electronic equipment":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Refusal to name the goal":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Repairs":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Urgent needs":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_Wedding / gift / holiday":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_XAP":["sum", "mean"],
"NAME_CASH_LOAN_PURPOSE_XNA":["sum", "mean"],
"NAME_CONTRACT_STATUS_Approved":["sum", "mean"],
"NAME_CONTRACT_STATUS_Canceled":["sum", "mean"],
"NAME_CONTRACT_STATUS_Refused":["sum", "mean"],
"NAME_CONTRACT_STATUS_Unused offer":["sum", "mean"],
"NAME_PAYMENT_TYPE_Cash through the bank":["sum", "mean"],
"NAME_PAYMENT_TYPE_Cashless from the account of the employer":["sum", "mean"],
"NAME_PAYMENT_TYPE_Non-cash from your account":["sum", "mean"],
"NAME_PAYMENT_TYPE_XNA":["sum", "mean"],
"CODE_REJECT_REASON_CLIENT":["sum", "mean"],
"CODE_REJECT_REASON_HC":["sum", "mean"],
"CODE_REJECT_REASON_LIMIT":["sum", "mean"],
"CODE_REJECT_REASON_SCO":["sum", "mean"],
"CODE_REJECT_REASON_SCOFR":["sum", "mean"],
"CODE_REJECT_REASON_SYSTEM":["sum", "mean"],
"CODE_REJECT_REASON_VERIF":["sum", "mean"],
"CODE_REJECT_REASON_XAP":["sum", "mean"],
"CODE_REJECT_REASON_XNA":["sum", "mean"],
"NAME_TYPE_SUITE_alone":["sum", "mean"],
"NAME_TYPE_SUITE_not_alone":["sum", "mean"],
"NAME_CLIENT_TYPE_New":["sum", "mean"],
"NAME_CLIENT_TYPE_Refreshed":["sum", "mean"],
"NAME_CLIENT_TYPE_Repeater":["sum", "mean"],
"NAME_CLIENT_TYPE_XNA":["sum", "mean"],
"NAME_GOODS_CATEGORY_Audio/Video":["sum", "mean"],
"NAME_GOODS_CATEGORY_Clothing and Accessories":["sum", "mean"],
"NAME_GOODS_CATEGORY_Computers":["sum", "mean"],
"NAME_GOODS_CATEGORY_Construction Materials":["sum", "mean"],
"NAME_GOODS_CATEGORY_Consumer Electronics":["sum", "mean"],
"NAME_GOODS_CATEGORY_Furniture":["sum", "mean"],
"NAME_GOODS_CATEGORY_Mobile":["sum", "mean"],
"NAME_GOODS_CATEGORY_Photo / Cinema Equipment":["sum", "mean"],
"NAME_GOODS_CATEGORY_XNA":["sum", "mean"],
"NAME_GOODS_CATEGORY_others":["sum", "mean"],
"NAME_PORTFOLIO_Cards":["sum", "mean"],
"NAME_PORTFOLIO_Cars":["sum", "mean"],
"NAME_PORTFOLIO_Cash":["sum", "mean"],
"NAME_PORTFOLIO_POS":["sum", "mean"],
"NAME_PORTFOLIO_XNA":["sum", "mean"],
"NAME_PRODUCT_TYPE_XNA":["sum", "mean"],
"NAME_PRODUCT_TYPE_walk-in":["sum", "mean"],
"NAME_PRODUCT_TYPE_x-sell":["sum", "mean"],
"CHANNEL_TYPE_AP+(Cash loan)":["sum", "mean"],
"CHANNEL_TYPE_Car dealer":["sum", "mean"],
"CHANNEL_TYPE_Channel of corporate sales":["sum", "mean"],
"CHANNEL_TYPE_Contact center":["sum", "mean"],
"CHANNEL_TYPE_Country-wide":["sum", "mean"],
"CHANNEL_TYPE_Credit and cash offices":["sum", "mean"],
"CHANNEL_TYPE_Regional / Local":["sum", "mean"],
"CHANNEL_TYPE_Stone":["sum", "mean"],
"NAME_SELLER_INDUSTRY_Clothing":["sum", "mean"],
"NAME_SELLER_INDUSTRY_Connectivity":["sum", "mean"],
"NAME_SELLER_INDUSTRY_Construction":["sum", "mean"],
"NAME_SELLER_INDUSTRY_Consumer electronics":["sum", "mean"],
"NAME_SELLER_INDUSTRY_Furniture":["sum", "mean"],
"NAME_SELLER_INDUSTRY_Industry":["sum", "mean"],
"NAME_SELLER_INDUSTRY_XNA":["sum", "mean"],
"NAME_SELLER_INDUSTRY_others":["sum", "mean"],
"NAME_YIELD_GROUP_XNA":["sum", "mean"],
"NAME_YIELD_GROUP_high":["sum", "mean"],
"NAME_YIELD_GROUP_low_action":["sum", "mean"],
"NAME_YIELD_GROUP_low_normal":["sum", "mean"],
"NAME_YIELD_GROUP_middle":["sum", "mean"],
"NEW_CHURN_PREV_0":["sum", "mean"],
"NEW_CHURN_PREV_1":["sum", "mean"],
"NEW_CHURN_PREV_NaN":["sum", "mean"]}
prev_agg_list.update(agg_list_previous_application)
return prev_agg_list, df_prev | Home Credit Default Risk |
9,067,638 | %matplotlib inline
<train_model> | def pre_processing_and_combine() :
with timer("Process application train"):
df = application_train()
print("application train & test shape:", df.shape)
with timer("Bureau and Bureau Balance"):
bureau_and_bb_agg = bureau_bb()
print("Bureau and Bureau Balance:", bureau_and_bb_agg.shape)
with timer("Installment Payments"):
agg_list_previous_application, ins_agg = installments_payments()
print("Installment Payments:", ins_agg.shape)
with timer("Pos Cash Balance"):
agg_list_previous_application, pos_agg = pos_cash_balance(agg_list_previous_application)
print("Pos Cash Balance:", pos_agg.shape)
with timer("Credit Card Balance"):
CCB_agg = credit_card_balance()
print("Credit Card Balance:", CCB_agg.shape)
with timer("previous_application"):
prev_agg_list, df_prev = previous_application(agg_list_previous_application)
print("previous_application:", df_prev.shape)
with timer("All tables are combining"):
df_prev_ins = df_prev.merge(ins_agg, how = 'left', on = 'SK_ID_PREV')
df_prev_ins_pos = df_prev_ins.merge(pos_agg, how = 'left', on = 'SK_ID_PREV')
df_prev_ins_pos_agg = df_prev_ins_pos.groupby("SK_ID_CURR" ).agg(prev_agg_list ).reset_index()
df_prev_ins_pos_agg.columns = pd.Index(["PREV_" + col[0] + "_" + col[1].upper() for col in df_prev_ins_pos_agg.columns.tolist() ])
df_prev_ins_pos_agg.rename(columns={"PREV_SK_ID_CURR_":"SK_ID_CURR"}, inplace = True)
df_prev_others = df.merge(df_prev_ins_pos_agg, how = 'left',on = 'SK_ID_CURR')
df_prev_ins_pos_ccb = df_prev_others.merge(CCB_agg, how = 'left',on = 'SK_ID_CURR')
all_data = df_prev_ins_pos_ccb.merge(bureau_and_bb_agg, how = 'left',on = 'SK_ID_CURR')
print("all_data process:", all_data.shape)
return all_data
| Home Credit Default Risk |
9,067,638 | train_image_folder = ".. /input/train-images/image/"
train_label_folder = ".. /input/train-labels/label/"
test_image_folder = ".. /input/test-images/image/"
train_list = os.listdir(train_image_folder)
if 'hmvsa0loxh3ek2y8rzmcyb6zrrh9mwyp' in train_list:
train_list.remove('hmvsa0loxh3ek2y8rzmcyb6zrrh9mwyp')
print('Train data:', len(train_list))<load_pretrained> | Home Credit Default Risk | |
9,067,638 | def load_dicom_volume(src_dir, suffix='*.dcm'):
encode_name = src_dir.split('/')[-1]
dicom_scans = [dicom.read_file(sp)\
for sp in glob.glob(os.path.join(src_dir, suffix)) ]
dicom_scans.sort(key=lambda s: float(s[(0x0020, 0x0032)][2]))
volume_image = np.stack([ds.pixel_array \
for ds in dicom_scans] ).astype(np.int16)
return encode_name, volume_image
def load_label(label_fpath, transpose=False):
encode_name = label_fpath[-39: -7]
label_data = nib.load(label_fpath)
label_array = label_data.get_fdata()
if transpose:
label_array = np.transpose(label_array, axes=(2, 1, 0))
return encode_name, label_array<load_pretrained> | def modeling(all_data):
all_data.columns = ["".join(c if c.isalnum() else "_" for c in str(x)) for x in all_data.columns]
train_df = all_data[all_data['TARGET'].notnull() ]
test_df = all_data[all_data['TARGET'].isnull() ]
folds = KFold(n_splits = 10, shuffle = True, random_state = 1001)
oof_preds = np.zeros(train_df.shape[0])
sub_preds = np.zeros(test_df.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR']]
for n_fold,(train_idx, valid_idx)in enumerate(folds.split(train_df[feats], train_df['TARGET'])) :
train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]
valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]
clf = LGBMClassifier(
n_jobs = -1,
n_estimators=10000,
learning_rate=0.02,
num_leaves=34,
colsample_bytree=0.9497036,
subsample=0.8715623,
max_depth=8,
reg_alpha=0.041545473,
reg_lambda=0.0735294,
min_split_gain=0.0222415,
min_child_weight=39.3259775,
silent=-1,
verbose=-1,)
clf.fit(train_x, train_y, eval_set = [(train_x, train_y),(valid_x, valid_y)],
eval_metric = 'auc', verbose = 200, early_stopping_rounds = 200)
oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d AUC : %.6f' %(n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))
print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))
test_df['TARGET'] = sub_preds
test_df[['SK_ID_CURR', 'TARGET']].to_csv("submission_lightgbm.csv", index= False)
display_importances(feature_importance_df)
return feature_importance_df
| Home Credit Default Risk |
9,067,638 | for encode in tqdm.tqdm(train_list):
_, volume_image = load_dicom_volume(os.path.join(train_image_folder, encode))
npz_folder = os.path.join(train_image_npz_folder, encode)
if not os.path.exists(npz_folder):
os.mkdir(npz_folder)
num_slice = volume_image.shape[0]
for _z in range(0, num_slice):
npz_path = os.path.join(npz_folder, "%03d.npz"%(_z))
np.savez_compressed(npz_path, image=volume_image[_z])
del volume_image<load_pretrained> | def main() :
with timer("Preprocessing Time"):
all_data = pre_processing_and_combine()
with timer("Modeling"):
feat_importance = modeling(all_data)
| Home Credit Default Risk |
9,067,638 | <set_options><EOS> | if __name__ == "__main__":
with timer("Full model run"):
main() | Home Credit Default Risk |
5,966,704 | <prepare_output><EOS> | warnings.simplefilter(action='ignore', category=FutureWarning)
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
@contextmanager
def timer(title):
t0 = time.time()
yield
print("{} - done in {:.0f}s".format(title, time.time() - t0))
def one_hot_encoder(df, nan_as_category = True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
def application_train_test(num_rows = None, nan_as_category = False):
df = pd.read_csv('/kaggle/input/home-credit-default-risk/application_train.csv', nrows= num_rows)
test_df = pd.read_csv('/kaggle/input/home-credit-default-risk/application_test.csv', nrows= num_rows)
print("Train samples: {}, test samples: {}".format(len(df), len(test_df)))
df = df.append(test_df ).reset_index()
df = df[df['CODE_GENDER'] != 'XNA']
df['New_Family_Status']=df['NAME_FAMILY_STATUS'].apply(lambda x:0 if x in ['Married','Widow'] else 1)
df['CH_FA']=df['CNT_CHILDREN']*df['New_Family_Status']
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = pd.factorize(df[bin_feature])
df, cat_cols = one_hot_encoder(df, nan_as_category)
df['DAYS_EMPLOYED_ANOM']=(df['DAYS_EMPLOYED']==365243)
df['DAYS_EMPLOYED'].replace({365243:np.nan},inplace=True)
df['DAYS_BIRTH'] = df['DAYS_BIRTH'].abs() / 365
df['DAYS_EMPLOYED'] = df['DAYS_EMPLOYED'].abs() / 365
df.rename(columns={'DAYS_BIRTH': 'YEARS_BIRTH', 'DAYS_EMPLOYED': 'YEARS_EMPLOYED'}, inplace=True)
df['YEARS_EMPLOYED_PREC']=df['YEARS_EMPLOYED']/df['YEARS_BIRTH']
df['YEARS_EMPLOYED_DIF']=df['YEARS_BIRTH']-df['YEARS_EMPLOYED']
df['INCOME_CREDIT_PERC']=df['AMT_INCOME_TOTAL']/df['AMT_CREDIT']
df['INCOME_PER_PERSON']=df['AMT_INCOME_TOTAL']/df['CNT_FAM_MEMBERS']
df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
df['AVG_CREDIT_TERM'] = df['AMT_CREDIT'] / df['AMT_ANNUITY']
del test_df
gc.collect()
return df
def bureau_and_balance(num_rows = None, nan_as_category = True):
bureau = pd.read_csv('/kaggle/input/home-credit-default-risk/bureau.csv', nrows = num_rows)
bb = pd.read_csv('/kaggle/input/home-credit-default-risk/bureau_balance.csv', nrows = num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU' ).agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist() ])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(['SK_ID_BUREAU'], axis=1, inplace= True)
del bb, bb_agg
gc.collect()
num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'DAYS_CREDIT_UPDATE': ['mean'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
}
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist() ])
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR' ).agg(num_aggregations)
active_agg.columns = pd.Index(['ACTIVE_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')
del active, active_agg
gc.collect()
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR' ).agg(num_aggregations)
closed_agg.columns = pd.Index(['CLOSED_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')
del closed, closed_agg, bureau
gc.collect()
return bureau_agg
def previous_applications(num_rows = None, nan_as_category = True):
prev = pd.read_csv('/kaggle/input/home-credit-default-risk/previous_application.csv', nrows = num_rows)
prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']
num_aggregations = {
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_APPLICATION': ['min', 'max', 'mean'],
'AMT_CREDIT': ['min', 'max', 'mean'],
'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],
'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],
'AMT_GOODS_PRICE': ['min', 'max', 'mean'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
}
cat_aggregations = {}
for cat in cat_cols:
cat_aggregations[cat] = ['mean']
prev_agg = prev.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist() ])
approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
approved_agg = approved.groupby('SK_ID_CURR' ).agg(num_aggregations)
approved_agg.columns = pd.Index(['APPROVED_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist() ])
prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR')
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
refused_agg = refused.groupby('SK_ID_CURR' ).agg(num_aggregations)
refused_agg.columns = pd.Index(['REFUSED_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist() ])
prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR')
del refused, refused_agg, approved, approved_agg, prev
gc.collect()
return prev_agg
def pos_cash(num_rows = None, nan_as_category = True):
pos = pd.read_csv('/kaggle/input/home-credit-default-risk/POS_CASH_balance.csv', nrows = num_rows)
pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)
aggregations = {
'MONTHS_BALANCE': ['max', 'mean', 'size'],
'SK_DPD': ['max', 'mean'],
'SK_DPD_DEF': ['max', 'mean']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
pos_agg = pos.groupby('SK_ID_CURR' ).agg(aggregations)
pos_agg.columns = pd.Index(['POS_' + e[0] + "_" + e[1].upper() for e in pos_agg.columns.tolist() ])
pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR' ).size()
del pos
gc.collect()
return pos_agg
def installments_payments(num_rows = None, nan_as_category = True):
ins = pd.read_csv('/kaggle/input/home-credit-default-risk/installments_payments.csv', nrows = num_rows)
ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)
ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']
ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']
ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']
ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']
ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)
ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)
aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DPD': ['max', 'mean', 'sum'],
'DBD': ['max', 'mean', 'sum'],
'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],
'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
ins_agg = ins.groupby('SK_ID_CURR' ).agg(aggregations)
ins_agg.columns = pd.Index(['INSTAL_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist() ])
ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR' ).size()
del ins
gc.collect()
return ins_agg
def credit_card_balance(num_rows = None, nan_as_category = True):
cc = pd.read_csv('/kaggle/input/home-credit-default-risk/credit_card_balance.csv', nrows = num_rows)
cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)
cc.drop(['SK_ID_PREV'], axis= 1, inplace = True)
cc_agg = cc.groupby('SK_ID_CURR' ).agg(['min', 'max', 'mean', 'sum', 'var'])
cc_agg.columns = pd.Index(['CC_' + e[0] + "_" + e[1].upper() for e in cc_agg.columns.tolist() ])
cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR' ).size()
del cc
gc.collect()
return cc_agg
def kfold_lightgbm(df, num_folds, stratified = False, debug= False):
train_df = df[df['TARGET'].notnull() ]
test_df = df[df['TARGET'].isnull() ]
print("Starting LightGBM.Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape))
del df
gc.collect()
if stratified:
folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001)
else:
folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001)
oof_preds = np.zeros(train_df.shape[0])
sub_preds = np.zeros(test_df.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]
for n_fold,(train_idx, valid_idx)in enumerate(folds.split(train_df[feats], train_df['TARGET'])) :
train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]
valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]
clf = LGBMClassifier(
nthread=4,
n_estimators=10000,
learning_rate=0.02,
num_leaves=34,
colsample_bytree=0.9497036,
subsample=0.8715623,
max_depth=8,
reg_alpha=0.041545473,
reg_lambda=0.0735294,
min_split_gain=0.0222415,
min_child_weight=39.3259775,
random_state=100,
silent=-1,
verbose=-1,)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)],
eval_metric= 'auc', verbose= 100, early_stopping_rounds= 200)
oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d AUC : %.6f' %(n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))
del clf, train_x, train_y, valid_x, valid_y
gc.collect()
print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))
if not debug:
test_df['TARGET'] = sub_preds
test_df[['SK_ID_CURR', 'TARGET']].to_csv(submission_file_name, index= False)
display_importances(feature_importance_df)
return feature_importance_df
def display_importances(feature_importance_df_):
cols = feature_importance_df_[["feature", "importance"]].groupby("feature" ).mean().sort_values(by="importance", ascending=False)[:40].index
best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
plt.figure(figsize=(8, 10))
sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False))
plt.title('LightGBM Features(avg over folds)')
plt.tight_layout()
plt.savefig('lgbm_importances01.png')
def main(debug = False):
num_rows = 10000 if debug else None
df = application_train_test(num_rows)
with timer("Process bureau and bureau_balance"):
bureau = bureau_and_balance(num_rows)
print("Bureau df shape:", bureau.shape)
df = df.join(bureau, how='left', on='SK_ID_CURR')
del bureau
gc.collect()
with timer("Process previous_applications"):
prev = previous_applications(num_rows)
print("Previous applications df shape:", prev.shape)
df = df.join(prev, how='left', on='SK_ID_CURR')
del prev
gc.collect()
with timer("Process POS-CASH balance"):
pos = pos_cash(num_rows)
print("Pos-cash balance df shape:", pos.shape)
df = df.join(pos, how='left', on='SK_ID_CURR')
del pos
gc.collect()
with timer("Process installments payments"):
ins = installments_payments(num_rows)
print("Installments payments df shape:", ins.shape)
df = df.join(ins, how='left', on='SK_ID_CURR')
del ins
gc.collect()
with timer("Process credit card balance"):
cc = credit_card_balance(num_rows)
print("Credit card balance df shape:", cc.shape)
df = df.join(cc, how='left', on='SK_ID_CURR')
del cc
gc.collect()
with timer("Run LightGBM with kfold"):
feat_importance = kfold_lightgbm(df, num_folds= 5, stratified= False, debug= debug)
if __name__ == "__main__":
submission_file_name = "submission_kernel02.csv"
with timer("Full model run"):
main()
| Home Credit Default Risk |
4,160,759 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<define_variables> | warnings.simplefilter(action='ignore', category=FutureWarning ) | Home Credit Default Risk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.