kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
1,046,068
for model in all_model: scores = cross_val_score(model, X, Y, cv=get_kfold() , scoring='f1') print("Model : " , model , " has f1 score : " , scores.mean() )<train_model>
df_bureau_balance = df_bureau_balance.pivot_table(index = 'SK_ID_BUREAU',columns = 'Period_status',values = ['MONTHS_BALANCEcount','MONTHS_BALANCEmin','MONTHS_BALANCEmax','MONTHS_BALANCEmean'] ).reset_index() df_bureau_balance.columns = [''.join(col ).strip() for col in df_bureau_balance.columns.values] df_bureau = pd.merge(df_bureau,df_bureau_balance, how="left", on = "SK_ID_BUREAU") df_bureau=pd.get_dummies(df_bureau,prefix =['CREDIT_ACTIVE','CREDIT_CURRENCY','CREDIT_TYPE'] ,columns = ['CREDIT_ACTIVE','CREDIT_CURRENCY','CREDIT_TYPE'],dummy_na = True) df_bureau.head(5 )
Home Credit Default Risk
1,046,068
<import_modules>
df_bureau_features = df_bureau.groupby(['SK_ID_CURR'])\ .agg({'SK_ID_BUREAU' : ['nunique'], 'DAYS_CREDIT' : ['min','max','mean','std'], 'CREDIT_DAY_OVERDUE' :['min','max','mean','std'], 'DAYS_CREDIT_ENDDATE':['min','max','mean','std'], 'DAYS_ENDDATE_FACT': ['min','max','mean','std'], 'AMT_CREDIT_MAX_OVERDUE' : ['mean','min','max'], 'CNT_CREDIT_PROLONG' : ['mean','min','max'], 'AMT_CREDIT_SUM' : ['min','max','mean','std'], 'AMT_CREDIT_SUM_DEBT' : ['sum','min','max'], 'AMT_CREDIT_SUM_LIMIT': ['sum','min','max'], 'AMT_CREDIT_SUM_OVERDUE':['sum','min','max'], 'DAYS_CREDIT_UPDATE' : ['sum','min','max'], 'AMT_ANNUITY' : ['sum','min','max','mean'] }) df_bureau_features = df_bureau_features.reset_index() df_bureau_features.columns = [''.join(col ).strip() for col in df_bureau_features.columns.values] df_bureau_features_ohe = df_bureau[['SK_ID_CURR'] + ohe_cols].groupby(['SK_ID_CURR'] ).mean().reset_index() df_bureau_features = pd.merge(df_bureau_features,df_bureau_features_ohe,how = 'left',on = 'SK_ID_CURR') del df_bureau_features_ohe df_train = pd.merge(df_train,df_bureau_features,how = 'left',on = 'SK_ID_CURR') df_test = pd.merge(df_test,df_bureau_features,how = 'left',on = 'SK_ID_CURR' )
Home Credit Default Risk
1,046,068
from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.model_selection import GridSearchCV<choose_model_class>
df_previous_application = pd.read_csv(".. /input/previous_application.csv") cat_cols = [col for col in df_previous_application.columns if(df_previous_application[col].dtype == object)&(( col != 'SK_ID_CURR')|(col != 'SK_ID_PREV')) ] df_previous_application = pd.get_dummies(df_previous_application,prefix = cat_cols,columns = cat_cols) df_previous_application.head()
Home Credit Default Risk
1,046,068
model = LogisticRegression() solvers = ['newton-cg', 'lbfgs', 'liblinear'] penalty = ['l2'] c_values = [100, 10, 1.0, 0.1, 0.01]<train_on_grid>
df_POS_CASH_balance = pd.read_csv(".. /input/POS_CASH_balance.csv") df_POS_CASH_balance = pd.get_dummies(df_POS_CASH_balance, columns= ["NAME_CONTRACT_STATUS"]) df_POS_CASH_balance_current = df_POS_CASH_balance.drop('SK_ID_PREV',axis = 1 ).groupby('SK_ID_CURR' ).mean().reset_index() df_POS_CASH_balance_previous = df_POS_CASH_balance.drop('SK_ID_CURR',axis = 1 ).groupby('SK_ID_PREV' ).mean().reset_index() del df_POS_CASH_balance df_POS_CASH_balance_current.head()
Home Credit Default Risk
1,046,068
grid = dict(solver=solvers,penalty=penalty,C=c_values) cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=1) grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='f1',error_score=0, verbose = 3) grid_result = grid_search.fit(X, Y) <find_best_params>
df_installments_payments = pd.read_csv(".. /input/installments_payments.csv") df_installments_payments_current = df_installments_payments.drop('SK_ID_PREV',axis= 1 ).groupby('SK_ID_CURR' ).mean().reset_index() df_installments_payments_previous = df_installments_payments.drop('SK_ID_CURR',axis= 1 ).groupby('SK_ID_PREV' ).mean().reset_index() del df_installments_payments df_installments_payments_current.head()
Home Credit Default Risk
1,046,068
print("Best: %f using %s" %(grid_result.best_score_, grid_result.best_params_)) means = grid_result.cv_results_['mean_test_score'] stds = grid_result.cv_results_['std_test_score'] params = grid_result.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print("%f(%f)with: %r" %(mean, stdev, param))<choose_model_class>
df_credit_card_balance = pd.read_csv(".. /input/credit_card_balance.csv") df_credit_card_balance = pd.get_dummies(df_credit_card_balance, columns= ['NAME_CONTRACT_STATUS']) df_credit_card_balance_current = df_credit_card_balance.drop('SK_ID_PREV',axis = 1 ).groupby('SK_ID_CURR' ).mean().reset_index() df_credit_card_balance_previous = df_credit_card_balance.drop('SK_ID_CURR',axis = 1 ).groupby('SK_ID_PREV' ).mean().reset_index() del df_credit_card_balance df_credit_card_balance_current.head()
Home Credit Default Risk
1,046,068
model = LogisticRegression(C= 1.0, penalty= 'l2', solver = 'newton-cg' )<compute_train_metric>
df_train = df_train.merge(df_POS_CASH_balance_current, on = 'SK_ID_CURR',how = 'left',suffixes=['','_POS_bal_curr'])\ .merge(df_installments_payments_current, on = 'SK_ID_CURR',how = 'left', suffixes = ['','_installments_curr'])\ .merge(df_credit_card_balance_current, on = 'SK_ID_CURR',how = 'left',suffixes=['','_credit_card_bal_curr']) df_test = df_test.merge(df_POS_CASH_balance_current, on = 'SK_ID_CURR',how = 'left',suffixes=['','_POS_bal_curr'])\ .merge(df_installments_payments_current, on = 'SK_ID_CURR',how = 'left',suffixes = ['','_installments_curr'])\ .merge(df_credit_card_balance_current, on = 'SK_ID_CURR',how = 'left',suffixes=['','_credit_card_bal_curr'])\ df_train.head()
Home Credit Default Risk
1,046,068
scores = cross_val_score(model, X, Y, cv=get_kfold() , scoring='f1') print(scores.mean() )<train_model>
df_previous_application = df_previous_application.merge(df_POS_CASH_balance_previous, on = 'SK_ID_PREV',how = 'left',suffixes=['','_POS_bal_past'])\ .merge(df_installments_payments_previous, on = 'SK_ID_PREV',how = 'left',suffixes = ['','_installments_past'])\ .merge(df_credit_card_balance_previous, on = 'SK_ID_PREV',how = 'left',suffixes=['','_credit_card_bal_past']) df_previous_application = df_previous_application.drop("SK_ID_PREV",axis= 1 ).groupby(['SK_ID_CURR'] ).mean().reset_index() df_train = df_train.merge(df_previous_application, on = 'SK_ID_CURR',how = 'left',suffixes=('','_past_appl')) df_test = df_test.merge(df_previous_application, on = 'SK_ID_CURR',how = 'left',suffixes=('','_past_appl')) df_previous_application.head() del df_previous_application
Home Credit Default Risk
1,046,068
model = model.fit(X,Y )<drop_column>
df1 = df_train.sample(frac = 1) msk = np.random.rand(len(df1)) eval_set = df1[msk >= 0.95] train = df1[msk < 0.95] X_train = train.drop(['TARGET','SK_ID_CURR'],axis = 1) Y_train = train['TARGET'] X_eval = eval_set.drop(['TARGET','SK_ID_CURR'],axis = 1) Y_eval = eval_set['TARGET'] def xgb_(X_train = X_train, Y_train = Y_train, params = { "objective" :['multi:softmax'] ,"max_depth" :[2] ,'eta' :[0.1] }, fit_params = { 'eval_metric' :['mlogloss'] ,'eval_set' :[(X_eval,Y_eval)]}, X_eval = X_eval, Y_eval = Y_eval): print(" print() model_xgb = xgb.XGBClassifier() clf = GridSearchCV(model_xgb, param_grid = params, fit_params = fit_params,cv =3, scoring ="roc_auc") clf.fit(X_train, Y_train) print(" Best parameters set found on development set:") print() print(clf.best_params_) print() print("Grid scores on development set:") print() means = clf.cv_results_['mean_test_score'] stds = clf.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, clf.cv_results_['params']): print("%0.3f(+/-%0.03f)for %r" %(mean, std * 2, params)) print() print("Detailed classification report:") print() print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.") print() y_true, y_pred = Y_eval, clf.predict(X_eval) print(classification_report(y_true, y_pred)) print() print(confusion_matrix(y_true, y_pred)) print() model_xgb = clf.best_estimator_ xgb.plot_importance(model_xgb,max_num_features = 20) Y_dev_pred = model_xgb.predict_proba(X_eval)[:,1] score = roc_auc_score(Y_eval,Y_dev_pred) results = model_xgb.evals_result() metrics = fit_params['eval_metric'] epochs = len(results['validation_0'][metrics[0]]) x_axis = range(0, epochs) for metric in metrics: fig, ax = pyplot.subplots() ax.plot(x_axis, results['validation_0'][metric], label='Train') ax.plot(x_axis, results['validation_1'][metric], label='Validation/Hold out set') ax.legend() pyplot.ylabel('%s' %(metric)) pyplot.title('XGBoost %s' %(metric)) pyplot.show() return model_xgb,score,clf,results,model_xgb.feature_importances_
Home Credit Default Risk
1,046,068
test = test.drop(['isChurned'], axis = True )<predict_on_test>
xgb_params = { 'learning_rate' :[0.02] ,'reg_lambda' :[16] ,"max_depth" :[9] ,'silent' :[False] ,'n_estimators' :[1000] ,'colsample_bytree' :[0.5] ,'nthread' :[-1] ,'subsample' :[0.5] ,'objective' :["binary:logistic"] ,'scale_pos_weight' :[2] } fit_params = { 'eval_metric' :['auc'] ,'eval_set' :[(X_train,Y_train),(X_eval,Y_eval)] ,'early_stopping_rounds' : 30 } model_xgb,score_xgb,xgb_gridsearch,results,feats = xgb_(X_train = X_train,X_eval = X_eval,params = xgb_params,fit_params = fit_params)
Home Credit Default Risk
1,046,068
prediction = model.predict(test )<data_type_conversions>
X_test = df_test.drop(['TARGET','SK_ID_CURR'],axis = 1) Y_test = model_xgb.predict_proba(X_test)[:,1]
Home Credit Default Risk
1,046,068
<count_values><EOS>
df_application_test["TARGET"] = Y_test df_submit = df_application_test[["SK_ID_CURR","TARGET"]] df_submit.to_csv('submission_appl_bureau.csv', index=False )
Home Credit Default Risk
1,025,021
<SOS> metric: AUC Kaggle data source: home-credit-default-risk<save_to_csv>
warnings.filterwarnings('ignore') gc.enable()
Home Credit Default Risk
1,025,021
sample_submission.to_csv("submission.csv", index = False )<import_modules>
print('Importing data...') lgbm_submission = pd.read_csv('.. /input/sample_submission.csv' )
Home Credit Default Risk
1,025,021
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LogisticRegression from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler<load_from_csv>
buro_bal = pd.read_csv('.. /input/bureau_balance.csv') print('Buro bal shape : ', buro_bal.shape) print('transform to dummies') buro_bal = pd.concat([buro_bal, pd.get_dummies(buro_bal.STATUS, prefix='buro_bal_status')], axis=1 ).drop('STATUS', axis=1) print('Counting buros') buro_counts = buro_bal[['SK_ID_BUREAU', 'MONTHS_BALANCE']].groupby('SK_ID_BUREAU' ).count() buro_bal['buro_count'] = buro_bal['SK_ID_BUREAU'].map(buro_counts['MONTHS_BALANCE']) print('averaging buro bal') avg_buro_bal = buro_bal.groupby('SK_ID_BUREAU' ).mean() avg_buro_bal.columns = ['avg_buro_' + f_ for f_ in avg_buro_bal.columns] del buro_bal gc.collect() print('Read Bureau') buro = pd.read_csv('.. /input/bureau.csv') print('Go to dummies') buro_credit_active_dum = pd.get_dummies(buro.CREDIT_ACTIVE, prefix='ca_') buro_credit_currency_dum = pd.get_dummies(buro.CREDIT_CURRENCY, prefix='cu_') buro_credit_type_dum = pd.get_dummies(buro.CREDIT_TYPE, prefix='ty_') buro_full = pd.concat([buro, buro_credit_active_dum, buro_credit_currency_dum, buro_credit_type_dum], axis=1) del buro_credit_active_dum, buro_credit_currency_dum, buro_credit_type_dum gc.collect() print('Merge with buro avg') buro_full = buro_full.merge(right=avg_buro_bal.reset_index() , how='left', on='SK_ID_BUREAU', suffixes=('', '_bur_bal')) print('Counting buro per SK_ID_CURR') nb_bureau_per_curr = buro_full[['SK_ID_CURR', 'SK_ID_BUREAU']].groupby('SK_ID_CURR' ).count() buro_full['SK_ID_BUREAU'] = buro_full['SK_ID_CURR'].map(nb_bureau_per_curr['SK_ID_BUREAU']) print('Averaging bureau') avg_buro = buro_full.groupby('SK_ID_CURR' ).mean() del buro, buro_full gc.collect() print('Read prev') prev = pd.read_csv('.. /input/previous_application.csv') prev_cat_features = [ f_ for f_ in prev.columns if prev[f_].dtype == 'object' ] print('Go to dummies') prev_dum = pd.DataFrame() for f_ in prev_cat_features: prev_dum = pd.concat([prev_dum, pd.get_dummies(prev[f_], prefix=f_ ).astype(np.uint8)], axis=1) prev = pd.concat([prev, prev_dum], axis=1) del prev_dum gc.collect() print('Counting number of Prevs') nb_prev_per_curr = prev[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() prev['SK_ID_PREV'] = prev['SK_ID_CURR'].map(nb_prev_per_curr['SK_ID_PREV']) print('Averaging prev') avg_prev = prev.groupby('SK_ID_CURR' ).mean() del prev gc.collect() print('Reading POS_CASH') pos = pd.read_csv('.. /input/POS_CASH_balance.csv') print('Go to dummies') pos = pd.concat([pos, pd.get_dummies(pos['NAME_CONTRACT_STATUS'])], axis=1) print('Compute nb of prevs per curr') nb_prevs = pos[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() pos['SK_ID_PREV'] = pos['SK_ID_CURR'].map(nb_prevs['SK_ID_PREV']) print('Go to averages') avg_pos = pos.groupby('SK_ID_CURR' ).mean() del pos, nb_prevs gc.collect() print('Reading CC balance') cc_bal = pd.read_csv('.. /input/credit_card_balance.csv') print('Go to dummies') cc_bal = pd.concat([cc_bal, pd.get_dummies(cc_bal['NAME_CONTRACT_STATUS'], prefix='cc_bal_status_')], axis=1) nb_prevs = cc_bal[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() cc_bal['SK_ID_PREV'] = cc_bal['SK_ID_CURR'].map(nb_prevs['SK_ID_PREV']) print('Compute average') avg_cc_bal = cc_bal.groupby('SK_ID_CURR' ).mean() avg_cc_bal.columns = ['cc_bal_' + f_ for f_ in avg_cc_bal.columns] del cc_bal, nb_prevs gc.collect() print('Reading Installments') inst = pd.read_csv('.. /input/installments_payments.csv') nb_prevs = inst[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() inst['SK_ID_PREV'] = inst['SK_ID_CURR'].map(nb_prevs['SK_ID_PREV']) avg_inst = inst.groupby('SK_ID_CURR' ).mean() avg_inst.columns = ['inst_' + f_ for f_ in avg_inst.columns] print('Read data and test') data = pd.read_csv('.. /input/application_train.csv') test = pd.read_csv('.. /input/application_test.csv') print('Shapes : ', data.shape, test.shape) y = data['TARGET'] del data['TARGET'] categorical_feats = [ f for f in data.columns if data[f].dtype == 'object' ] categorical_feats for f_ in categorical_feats: data[f_], indexer = pd.factorize(data[f_]) test[f_] = indexer.get_indexer(test[f_]) data = data.merge(right=avg_buro.reset_index() , how='left', on='SK_ID_CURR') test = test.merge(right=avg_buro.reset_index() , how='left', on='SK_ID_CURR') data = data.merge(right=avg_prev.reset_index() , how='left', on='SK_ID_CURR') test = test.merge(right=avg_prev.reset_index() , how='left', on='SK_ID_CURR') data = data.merge(right=avg_pos.reset_index() , how='left', on='SK_ID_CURR') test = test.merge(right=avg_pos.reset_index() , how='left', on='SK_ID_CURR') data = data.merge(right=avg_cc_bal.reset_index() , how='left', on='SK_ID_CURR') test = test.merge(right=avg_cc_bal.reset_index() , how='left', on='SK_ID_CURR') data = data.merge(right=avg_inst.reset_index() , how='left', on='SK_ID_CURR') test = test.merge(right=avg_inst.reset_index() , how='left', on='SK_ID_CURR') del avg_buro, avg_prev gc.collect()
Home Credit Default Risk
1,025,021
path_train = '.. /input/seleksidukungaib/train.csv' path_test = '.. /input/seleksidukungaib/test.csv' train_data = pd.read_csv(path_train) test_data = pd.read_csv(path_test )<count_values>
train_x, valid_x, train_y, valid_y = train_test_split(data, y, test_size=0.2, shuffle=True, stratify=y, random_state=1301 )
Home Credit Default Risk
1,025,021
train_data['isChurned'].value_counts()<count_missing_values>
train_data=lgb.Dataset(train_x,label=train_y) valid_data=lgb.Dataset(valid_x,label=valid_y )
Home Credit Default Risk
1,025,021
train_data.isna().sum()<drop_column>
params = {'metric' : 'auc', 'boosting_type' : 'gbdt', 'colsample_bytree' : 0.9234, 'num_leaves' : 13, 'max_depth' : -1, 'n_estimators' : 200, 'min_child_samples': 399, 'min_child_weight': 0.1, 'reg_alpha': 2, 'reg_lambda': 5, 'subsample': 0.855, 'verbose' : -1, 'num_threads' : 4 }
Home Credit Default Risk
1,025,021
train_data = train_data.dropna(axis=0 )<count_missing_values>
lgbm = lgb.train(params, train_data, 2500, valid_sets=valid_data, early_stopping_rounds= 30, verbose_eval= 10 )
Home Credit Default Risk
1,025,021
train_data.isna().sum()<concatenate>
y_hat = lgbm.predict(data) score = roc_auc_score(y, y_hat) print("Overall AUC: {:.3f}".format(score))
Home Credit Default Risk
1,025,021
data = pd.concat([train_data,test_data],ignore_index=True )<categorify>
import shap
Home Credit Default Risk
1,025,021
date = ['date_collected','date'] bin = ['premium','super','pinEnabled'] col = date + bin le = LabelEncoder() for i in col: data[i] = le.fit_transform(list(data[i].values))<split>
%time shap_values = shap.TreeExplainer(lgbm ).shap_values(valid_x )
Home Credit Default Risk
1,025,021
train_data = data[~data.isChurned.isnull() ] test_data = data[data.isChurned.isnull() ]<prepare_x_and_y>
data['CODE_GENDER'].value_counts()
Home Credit Default Risk
1,025,021
features = ['date_collected','num_topup_trx', 'num_recharge_trx','isActive','isVerifiedEmail','blocked','premium','super','userLevel','pinEnabled'] X = train_data[features] y = train_data['isChurned']<train_model>
data[data['CODE_GENDER']==2]
Home Credit Default Risk
1,025,021
model = LogisticRegression() model.fit(X,y )<predict_on_test>
test['CODE_GENDER'].value_counts()
Home Credit Default Risk
1,025,021
pred = model.predict(test_data[features] )<save_to_csv>
sub_pred = lgbm.predict(test )
Home Credit Default Risk
1,025,021
submission = pd.DataFrame({'idx':test_data['idx'],'isChurned':pred.astype(int)}) submission.to_csv('submission.csv',index=False )<import_modules>
sub_pred = np.clip(sub_pred, 0, 1 )
Home Credit Default Risk
1,025,021
import pandas as pd import numpy as np from sklearn.preprocessing import MinMaxScaler from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import f1_score from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt from sklearn.externals import joblib<set_options>
lgbm_submission.TARGET = sub_pred lgbm_submission.to_csv('subm_lgbm_auc{:.8f}.csv'.format(score), index=False, float_format='%.8f' )
Home Credit Default Risk
993,573
plt.rcParams['figure.figsize'] =(10, 8 )<load_from_csv>
app_train = pd.read_csv(path + "application_train.csv") app_train.head()
Home Credit Default Risk
993,573
train_og = pd.read_csv('/kaggle/input/equipfails/equip_failures_training_set.csv' )<load_from_csv>
bureau = pd.read_csv(path + "bureau.csv") bureau.head()
Home Credit Default Risk
993,573
test_og = pd.read_csv('/kaggle/input/equipfails/equip_failures_test_set.csv' )<concatenate>
bureau_balance = pd.read_csv(path + "bureau_balance.csv") bureau_balance.head()
Home Credit Default Risk
993,573
data = pd.concat([train_og, test_og], ignore_index=True, sort=False )<define_variables>
credit_card_balance = pd.read_csv(path + "credit_card_balance.csv") credit_card_balance.head()
Home Credit Default Risk
993,573
data = data.replace('na', np.nan )<data_type_conversions>
pcb = pd.read_csv(path + "POS_CASH_balance.csv") pcb.head()
Home Credit Default Risk
993,573
for c in data.columns: data[c] = pd.to_numeric(data[c] )<correct_missing_values>
previous_application = pd.read_csv(path + "previous_application.csv") previous_application.head()
Home Credit Default Risk
993,573
data_nonna = data.iloc[:,2:].fillna(data.mean() )<normalization>
installments_payments = pd.read_csv(path + "installments_payments.csv") installments_payments.head()
Home Credit Default Risk
993,573
scaler = MinMaxScaler()<train_model>
app_test = pd.read_csv('.. /input/application_test.csv') app_test['is_test'] = 1 app_test['is_train'] = 0 app_train['is_test'] = 0 app_train['is_train'] = 1 Y = app_train['TARGET'] train_X = app_train.drop(['TARGET'], axis = 1) test_id = app_test['SK_ID_CURR'] test_X = app_test data = pd.concat([train_X, test_X], axis=0 )
Home Credit Default Risk
993,573
scaler.fit(data_nonna )<normalization>
def _get_categorical_features(df): feats = [col for col in list(df.columns)if df[col].dtype == 'object'] return feats def _factorize_categoricals(df, cats): for col in cats: df[col], _ = pd.factorize(df[col]) return df def _get_dummies(df, cats): for col in cats: df = pd.concat([df, pd.get_dummies(df[col], prefix=col)], axis=1) return df data_cats = _get_categorical_features(data) prev_app_cats = _get_categorical_features(previous_application) bureau_cats = _get_categorical_features(bureau) pcb_cats = _get_categorical_features(pcb) ccbal_cats = _get_categorical_features(credit_card_balance) previous_application = _get_dummies(previous_application, prev_app_cats) bureau = _get_dummies(bureau, bureau_cats) pcb = _get_dummies(pcb, pcb_cats) credit_card_balance = _get_dummies(credit_card_balance, ccbal_cats) data = _factorize_categoricals(data, data_cats )
Home Credit Default Risk
993,573
X = scaler.transform(data_nonna )<prepare_x_and_y>
prev_apps_count = previous_application[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() previous_application['SK_ID_PREV'] = previous_application['SK_ID_CURR'].map(prev_apps_count['SK_ID_PREV']) prev_apps_avg = previous_application.groupby('SK_ID_CURR' ).mean() prev_apps_avg.columns = ['p_' + col for col in prev_apps_avg.columns] data = data.merge(right=prev_apps_avg.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
993,573
y = data.target.values<define_variables>
bureau_avg = bureau.groupby('SK_ID_CURR' ).mean() bureau_avg['buro_count'] = bureau[['SK_ID_BUREAU','SK_ID_CURR']].groupby('SK_ID_CURR' ).count() ['SK_ID_BUREAU'] bureau_avg.columns = ['b_' + f_ for f_ in bureau_avg.columns] data = data.merge(right=bureau_avg.reset_index() , how='left', on='SK_ID_CURR')
Home Credit Default Risk
993,573
m = train_og.shape[0]<split>
cnt_inst = installments_payments[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() installments_payments['SK_ID_PREV'] = installments_payments['SK_ID_CURR'].map(cnt_inst['SK_ID_PREV']) avg_inst = installments_payments.groupby('SK_ID_CURR' ).mean() avg_inst.columns = ['i_' + f_ for f_ in avg_inst.columns] data = data.merge(right=avg_inst.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
993,573
X_train, X_valid, y_train, y_valid = train_test_split(X[:m], y[:m], test_size=0.3, random_state = 0 )<count_values>
pcb_count = pcb[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() pcb['SK_ID_PREV'] = pcb['SK_ID_CURR'].map(pcb_count['SK_ID_PREV']) pcb_avg = pcb.groupby('SK_ID_CURR' ).mean() data = data.merge(right=pcb_avg.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
993,573
train_og.target.value_counts(normalize = True )<predict_on_test>
nb_prevs = credit_card_balance[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() credit_card_balance['SK_ID_PREV'] = credit_card_balance['SK_ID_CURR'].map(nb_prevs['SK_ID_PREV']) avg_cc_bal = credit_card_balance.groupby('SK_ID_CURR' ).mean() avg_cc_bal.columns = ['cc_bal_' + f_ for f_ in avg_cc_bal.columns] data = data.merge(right=avg_cc_bal.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
993,573
def get_f1_score(clf, X_train, X_valid, y_train, y_valid): y_train_preds = clf.predict(X_train) train_f1_score = f1_score(y_pred=y_train_preds, y_true=y_train) y_valid_preds = clf.predict(X_valid) valid_f1_score = f1_score(y_pred=y_valid_preds, y_true=y_valid) return train_f1_score, valid_f1_score<train_on_grid>
ignore_features = ['SK_ID_CURR', 'is_train', 'is_test'] relevant_features = [col for col in data.columns if col not in ignore_features] trainX = data[data['is_train'] == 1][relevant_features] testX = data[data['is_test'] == 1][relevant_features]
Home Credit Default Risk
993,573
lrclassifier = LogisticRegression() lrclassifier.fit(X_train, y_train )<compute_test_metric>
x_train, x_val, y_train, y_val = train_test_split(trainX, Y, test_size=0.2, random_state=18) lgb_train = lgb.Dataset(data=x_train, label=y_train) lgb_eval = lgb.Dataset(data=x_val, label=y_val )
Home Credit Default Risk
993,573
get_f1_score(lrclassifier, X_train, X_valid, y_train, y_valid )<define_variables>
params = {'task': 'train', 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': 'auc', 'learning_rate': 0.01, 'num_leaves': 48, 'num_iteration': 5000, 'verbose': 0 , 'colsample_bytree':.8, 'subsample':.9, 'max_depth':7, 'reg_alpha':.1, 'reg_lambda':.1, 'min_split_gain':.01, 'min_child_weight':1} model = lgb.train(params, lgb_train, valid_sets=lgb_eval, early_stopping_rounds=150, verbose_eval=200 )
Home Credit Default Risk
993,573
def select_sample(mult): sample_size = mult*y_train[y_train==1].shape[0] np.random.seed(2) sample_idx = np.concatenate([np.random.choice(np.where(y_train==0)[0], size = sample_size), np.where(y_train==1)[0]]) X_train_sample = X_train[sample_idx] y_train_sample = y_train[sample_idx] return X_train_sample, y_train_sample<train_model>
preds = model.predict(testX) sub_lgb = pd.DataFrame() sub_lgb['SK_ID_CURR'] = test_id sub_lgb['TARGET'] = preds sub_lgb.to_csv("lgb_baseline.csv", index=False) sub_lgb.head()
Home Credit Default Risk
1,499,104
def get_f1_by_sample_lr(mult, is_sample=True): if is_sample: X_train_sample, y_train_sample = select_sample(mult) else: X_train_sample, y_train_sample = X_train, y_train lrclassifier = LogisticRegression() lrclassifier.fit(X_train_sample, y_train_sample) train_f1_score, valid_f1_score = get_f1_score(lrclassifier, X_train_sample, X_valid, y_train_sample, y_valid) return {'sample_frac': X_train_sample.shape[0]/X_train.shape[0], 'train_f1_score': train_f1_score, 'valid_f1_score': valid_f1_score} <define_variables>
df = pd.read_pickle('.. /input/save-dromosys-features/df.pkl.gz') print("Raw shape: ", df.shape) df.set_index('SK_ID_CURR', inplace=True) y = df['TARGET'].copy() feats = [f for f in df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']] df.drop(['index', 'TARGET'], axis=1, inplace=True) print("X shape: ", df.shape, " y shape:", y.shape) print(" Preparing data...") for feat in feats: df[feat] = df[feat].fillna(df[feat].mean() )
Home Credit Default Risk
1,499,104
num_mults = int(sum(y_train==0)/sum(y_train==1))<categorify>
def rank_gauss(x): N = x.shape[0] temp = x.argsort() rank_x = temp.argsort() / N rank_x -= rank_x.mean() rank_x *= 2 efi_x = erfinv(rank_x) efi_x -= efi_x.mean() return efi_x
Home Credit Default Risk
1,499,104
f1_mult_lr = [] mult = 1 while mult < num_mults: f1_mult_lr.append(get_f1_by_sample_lr(mult)) if mult==1: mult+=9 else: mult+=10 f1_mult_lr.append(get_f1_by_sample_lr(_, is_sample=False))<normalization>
for i in df.columns: df[i] = rank_gauss(df[i].values )
Home Credit Default Risk
1,499,104
f1_mult_lr_df = pd.io.json.json_normalize(f1_mult_lr )<train_model>
training = y.notnull() testing = y.isnull() X_train = df[training].values X_test = df[testing].values y_train = np.array(y[training]) print(X_train.shape, X_test.shape, y_train.shape) gc.collect()
Home Credit Default Risk
1,499,104
def get_f1_by_sample_rf(mult, is_sample=True): if is_sample: X_train_sample, y_train_sample = select_sample(mult) else: X_train_sample, y_train_sample = X_train, y_train rfclassifier = RandomForestClassifier(n_estimators = 50, max_features = 10) rfclassifier.fit(X_train_sample, y_train_sample) train_f1_score, valid_f1_score = get_f1_score(rfclassifier, X_train_sample, X_valid, y_train_sample, y_valid) return {'sample_frac': X_train_sample.shape[0]/X_train.shape[0], 'train_f1_score': train_f1_score, 'valid_f1_score': valid_f1_score} <concatenate>
class IntervalEvaluation(Callback): def __init__(self, validation_data=() , interval=10): super(Callback, self ).__init__() self.interval = interval self.X_val, self.y_val = validation_data def on_epoch_end(self, epoch, logs={}): y_pred = self.model.predict_proba(self.X_val, verbose=0) score = roc_auc_score(self.y_val, y_pred) logging.info("interval evaluation - epoch: {:d} - score: {:.6f}".format(epoch, score)) print("interval evaluation - epoch: {:d} - score: {:.6f}".format(epoch, score)) logs['val_auc'] = score
Home Credit Default Risk
1,499,104
f1_mult_rf = [] mult = 1 while mult < num_mults: f1_mult_rf.append(get_f1_by_sample_rf(mult)) if mult==1: mult+=9 else: mult+=10 f1_mult_rf.append(get_f1_by_sample_rf(_, is_sample=False))<normalization>
n_folds = 10 folds = KFold(n_splits=n_folds, shuffle=True, random_state=42) sub_preds = np.zeros(X_test.shape[0]) oof_preds = np.zeros(X_train.shape[0]) for n_fold,(trn_idx, val_idx)in enumerate(folds.split(X_train)) : trn_x, trn_y = X_train[trn_idx], y_train[trn_idx] val_x, val_y = X_train[val_idx], y_train[val_idx] earlystop = EarlyStopping(monitor='val_auc', min_delta=0, patience=3, verbose=0, mode='max') file_path = "fold " + str(n_fold+1)+ " best_model.hdf5" check_point = ModelCheckpoint(file_path, monitor = "val_auc", verbose = 1, save_best_only = True, mode = "max") print('Setting up neural network...') nn = Sequential() nn.add(Dense(units = 400 , kernel_initializer = 'normal', input_dim = df.shape[1])) nn.add(PReLU()) nn.add(Dropout (.3)) nn.add(Dense(units = 160 , kernel_initializer = 'normal')) nn.add(PReLU()) nn.add(BatchNormalization()) nn.add(Dropout (.3)) nn.add(Dense(units = 64 , kernel_initializer = 'normal')) nn.add(PReLU()) nn.add(BatchNormalization()) nn.add(Dropout (.3)) nn.add(Dense(units = 26, kernel_initializer = 'normal')) nn.add(PReLU()) nn.add(BatchNormalization()) nn.add(Dropout (.3)) nn.add(Dense(units = 12, kernel_initializer = 'normal')) nn.add(PReLU()) nn.add(BatchNormalization()) nn.add(Dropout (.3)) nn.add(Dense(1, kernel_initializer='normal', activation='sigmoid')) nn.compile(loss='binary_crossentropy', optimizer='adam') print('Fitting neural network...') ival = IntervalEvaluation(validation_data=(val_x, val_y), interval=10) nn.fit(trn_x, trn_y, validation_data =(val_x, val_y), epochs=20, verbose=0, callbacks=[ival, earlystop, check_point], batch_size=128) best_model = load_model(file_path) oof_preds[val_idx] = best_model.predict(val_x ).flatten() print(roc_auc_score(val_y, oof_preds[val_idx])) print('Predicting...') sub_preds += best_model.predict(X_test ).flatten().clip(0,1)/ folds.n_splits gc.collect() print('FULL AUC: {}'.format(roc_auc_score(y_train, oof_preds)) )
Home Credit Default Risk
1,499,104
f1_mult_rf_df = pd.io.json.json_normalize(f1_mult_rf )<train_model>
print('Saving results...') sub = pd.DataFrame() sub_train = pd.DataFrame() sub['SK_ID_CURR'] = df[testing].index sub['TARGET'] = sub_preds sub[['SK_ID_CURR', 'TARGET']].to_csv('sub_nn.csv', index= False) print(sub.head() )
Home Credit Default Risk
1,194,140
X_train_sample, y_train_sample = select_sample(mult=50) f1_est = [] num_estimators = 10 while num_estimators <= 100: rfclassifier = RandomForestClassifier(n_estimators = num_estimators, max_features = 10) rfclassifier.fit(X_train_sample, y_train_sample) train_f1_score, valid_f1_score = get_f1_score(rfclassifier, X_train_sample, X_valid, y_train_sample, y_valid) f1_est.append({'num_estimators': num_estimators, 'train_f1_score': train_f1_score, 'valid_f1_score': valid_f1_score}) num_estimators+=10<load_from_disk>
cash = pd.read_csv('.. /input/POS_CASH_balance.csv') bureau_bal = pd.read_csv('.. /input/bureau_balance.csv') card = pd.read_csv('.. /input/credit_card_balance.csv') bureau = pd.read_csv('.. /input/bureau.csv') train = pd.read_csv('.. /input/application_train.csv') test = pd.read_csv('.. /input/application_test.csv') previous = pd.read_csv('.. /input/previous_application.csv') installment = pd.read_csv('.. /input/installments_payments.csv') print('Done!') print(cash.shape, bureau_bal.shape, card.shape, bureau.shape, train.shape, test.shape, previous.shape, installment.shape )
Home Credit Default Risk
1,194,140
f1_est_df = pd.io.json.json_normalize(f1_est )<train_model>
na = train.isnull().sum() /len(train) na.sort_values(ascending=False ).head(5 )
Home Credit Default Risk
1,194,140
X_train_sample, y_train_sample = select_sample(mult=50) f1_feat = [] max_features = 2 while max_features <= 20: rfclassifier = RandomForestClassifier(n_estimators = 60, max_features = max_features) rfclassifier.fit(X_train_sample, y_train_sample) train_f1_score, valid_f1_score = get_f1_score(rfclassifier, X_train_sample, X_valid, y_train_sample, y_valid) f1_feat.append({'max_features': max_features, 'train_f1_score': train_f1_score, 'valid_f1_score': valid_f1_score}) max_features+=1<load_from_disk>
train.loc[:,['SK_ID_CURR','TARGET','AMT_INCOME_TOTAL']].sort_values('AMT_INCOME_TOTAL', ascending=False ).head(10 )
Home Credit Default Risk
1,194,140
f1_feat_df = pd.io.json.json_normalize(f1_feat )<train_model>
train['DAYS_EMPLOYED'].value_counts().head()
Home Credit Default Risk
1,194,140
final_classifier = RandomForestClassifier(n_estimators = 60, max_features = 19) final_classifier.fit(X_train_sample, y_train_sample )<compute_test_metric>
bureau_bal.STATUS.value_counts()
Home Credit Default Risk
1,194,140
get_f1_score(final_classifier, X_train_sample, X_valid, y_train_sample, y_valid )<split>
house_train = train.loc[:,'APARTMENTS_AVG':'EMERGENCYSTATE_MODE'] house_test = test.loc[:,'APARTMENTS_AVG':'EMERGENCYSTATE_MODE'] house_var = house_train.columns.tolist() house_train = pd.get_dummies(house_train) house_test = pd.get_dummies(house_test) house_train.fillna(0, inplace=True) house_test.fillna(0, inplace=True) house_train.head()
Home Credit Default Risk
1,194,140
X_test = X[m:]<predict_on_test>
pca = PCA(n_components=3) house_train2 = pca.fit_transform(house_train) house_test2 = pca.transform(house_test) house_train2.shape, house_test2.shape
Home Credit Default Risk
1,194,140
y_test_preds = final_classifier.predict(X_test )<create_dataframe>
house_train2 = pd.DataFrame(house_train2, columns=['house_pc1','house_pc2','house_pc3']) house_test2 = pd.DataFrame(house_test, columns=['house_pc1','house_pc2','house_pc3'] )
Home Credit Default Risk
1,194,140
submission = pd.DataFrame({'id': test_og.id, 'target': y_test_preds.astype(int)} )<save_to_csv>
train = pd.concat([train.drop(columns=house_var), house_train2], axis=1) test = pd.concat([test.drop(columns=house_var), house_test2], axis=1) train.shape, test.shape
Home Credit Default Risk
1,194,140
submission.to_csv('submission_dk_201910192153.csv',index=False )<categorify>
train['CREDIT_INCOME_PERCENT'] = train['AMT_CREDIT']/train['AMT_INCOME_TOTAL'] train['ANNUITY_INCOME_PERCENT'] = train['AMT_ANNUITY']/train['AMT_INCOME_TOTAL'] train['DAYS_EMPLOYED_PERCENT'] = train['DAYS_EMPLOYED']/train['DAYS_BIRTH'] train['INCOME_PER_PERSON'] = train['AMT_INCOME_TOTAL'] / train['CNT_FAM_MEMBERS'] train['PAYMENT_RATE'] = train['AMT_ANNUITY'] / train['AMT_CREDIT'] test['CREDIT_INCOME_PERCENT'] = test['AMT_CREDIT']/test['AMT_INCOME_TOTAL'] test['ANNUITY_INCOME_PERCENT'] = test['AMT_ANNUITY']/test['AMT_INCOME_TOTAL'] test['DAYS_EMPLOYED_PERCENT'] = test['DAYS_EMPLOYED']/test['DAYS_BIRTH'] test['INCOME_PER_PERSON'] = test['AMT_INCOME_TOTAL'] / test['CNT_FAM_MEMBERS'] test['PAYMENT_RATE'] = test['AMT_ANNUITY'] / test['AMT_CREDIT']
Home Credit Default Risk
1,194,140
test_sample.replace('na', np.nan, inplace = True )<data_type_conversions>
log_features = ['AMT_INCOME_TOTAL','AMT_CREDIT','AMT_ANNUITY','AMT_GOODS_PRICE', 'DAYS_EMPLOYED', 'DAYS_REGISTRATION'] days_features = ['DAYS_BIRTH', 'DAYS_EMPLOYED','DAYS_REGISTRATION', 'DAYS_ID_PUBLISH'] train[days_features]=-train[days_features] test[days_features]=-test[days_features] train[log_features]=np.log1p(train[log_features]) test[log_features]=np.log1p(test[log_features] )
Home Credit Default Risk
1,194,140
test_sample_nonna = test_sample.fillna(data.mean() )<normalization>
train['na_col'] = train.isnull().sum(axis=1) test['na_col'] = test.isnull().sum(axis=1 )
Home Credit Default Risk
1,194,140
X_test_sample = scaler.transform(test_sample_nonna.iloc[:,1:] )<predict_on_test>
train = pd.get_dummies(train) test = pd.get_dummies(test) train_labels = train['TARGET'] train, test = train.align(test, join = 'inner', axis = 1) train['TARGET'] = train_labels
Home Credit Default Risk
1,194,140
y_test_sample_preds = final_classifier.predict(X_test_sample )<load_pretrained>
safe = bureau.loc[(bureau.CREDIT_DAY_OVERDUE==0)&(bureau.CNT_CREDIT_PROLONG==0),['SK_ID_CURR','SK_ID_BUREAU']] safe = safe.groupby('SK_ID_CURR' ).count().reset_index() safe.columns = ['SK_ID_CURR','past_loan_np'] prob = bureau[['SK_ID_CURR','CREDIT_DAY_OVERDUE','CNT_CREDIT_PROLONG']].groupby('SK_ID_CURR' ).max() prob.columns=['overdue_max','prolong_max'] prob=prob.reset_index() loan_type=pd.get_dummies(bureau.CREDIT_TYPE) loan_type['SK_ID_CURR'] = bureau['SK_ID_CURR'] loan_type = loan_type.groupby('SK_ID_CURR' ).mean().reset_index()
Home Credit Default Risk
1,194,140
data.mean().to_pickle('data_means.pkl' )<load_from_csv>
_ = bureau_bal.drop(columns=['STATUS'] ).groupby('SK_ID_BUREAU' ).max().reset_index() final_status = _.merge(bureau_bal, on=['SK_ID_BUREAU','MONTHS_BALANCE'], how='left') final_status = pd.get_dummies(final_status ).drop(columns='MONTHS_BALANCE') final_status2 = bureau[['SK_ID_CURR','SK_ID_BUREAU']].merge(final_status, on='SK_ID_BUREAU', how='left') final_status2.STATUS_X.fillna(1, inplace=True) final_status2.fillna(0, inplace=True) final_status2 = final_status2.drop(columns='SK_ID_BUREAU' ).groupby('SK_ID_CURR' ).mean().reset_index() final_status2 = final_status2.drop(columns=['STATUS_0','STATUS_C'] )
Home Credit Default Risk
1,194,140
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv') df=test.append(train) feats = [c for c in df.columns if c not in ['codigo_mun', 'comissionados_por_servidor']] <data_type_conversions>
del bureau, bureau_bal
Home Credit Default Risk
1,194,140
regx = re.compile('\(\d+\)') regx2 = re.compile('\,?\.?') df['populacao']=df['populacao'].str.replace(regx2,'' ).str.replace(regx, '' ).astype(dtype=np.int )<data_type_conversions>
cash_dpd = cash.loc[:,['SK_ID_CURR','SK_DPD_DEF']].groupby('SK_ID_CURR' ).agg(['max','median'] ).reset_index() cash_dpd.columns = ['SK_ID_CURR','Cash_SK_DPD_DEF_max', 'Cash_SK_DPD_DEF_median'] card_dpd = card.loc[:,['SK_ID_CURR','SK_DPD_DEF']].groupby('SK_ID_CURR' ).agg(['max','median'] ).reset_index() card_dpd.columns = ['SK_ID_CURR','Card_SK_DPD_DEF_max', 'Card_SK_DPD_DEF_median']
Home Credit Default Risk
1,194,140
df['area']=df['area'].str.replace(',','' ).astype(float )<data_type_conversions>
del cash, card
Home Credit Default Risk
1,194,140
df['densidade_dem']=df['densidade_dem'].str.replace(',','' ).astype(float) df['cat_porte'] = df['porte'].astype('category' ).cat.codes df['cat_regiao'] = df['regiao'].astype('category' ).cat.codes df['cat_estado'] = df['estado'].astype('category' ).cat.codes <define_variables>
p_good = previous.loc[(previous.NAME_CONTRACT_STATUS=='Approved')|(previous.NAME_CONTRACT_STATUS=='Unused offer'), ['SK_ID_CURR','AMT_CREDIT']] p_bad = previous.loc[(previous.NAME_CONTRACT_STATUS=='Canceled')|(previous.NAME_CONTRACT_STATUS=='Refused'), ['SK_ID_CURR','AMT_CREDIT']] p_good = p_good.dropna() p_good2 = p_good.groupby('SK_ID_CURR' ).sum().reset_index() p_good2.columns = ['SK_ID_CURR','good_credit'] p_bad2 = p_bad.groupby('SK_ID_CURR' ).sum().reset_index() p_bad2.columns = ['SK_ID_CURR','bad_credit']
Home Credit Default Risk
1,194,140
feats = [c for c in df.columns if c not in ['codigo_mun', 'comissionados_por_servidor','nota_mat', 'densidade_dem', 'participacao_transf_receita', 'servidores', 'perc_pop_econ_ativa', 'gasto_pc_saude', 'hab_p_medico', 'exp_anos_estudo', 'regiao', 'estado', 'porte', 'municipio']]<split>
installment['diff'] = installment.AMT_PAYMENT - installment.AMT_INSTALMENT ins_comp = installment.dropna(subset=['AMT_PAYMENT'] ).loc[:,['SK_ID_CURR','AMT_INSTALMENT','AMT_PAYMENT','diff']].groupby('SK_ID_CURR' ).sum().reset_index() ins_comp['repay']=ins_comp.AMT_PAYMENT / ins_comp.AMT_INSTALMENT ins_comp = ins_comp.loc[ins_comp.AMT_INSTALMENT!=0,:]
Home Credit Default Risk
1,194,140
train=df[~df.nota_mat.isnull() ] test_submissao=df[df.nota_mat.isnull() ]<split>
del previous, installment
Home Credit Default Risk
1,194,140
train_2, test = train_test_split(train, test_size=0.20, random_state=42) train_2, valid = train_test_split(train_2, test_size=0.20, random_state=42) rf = RandomForestClassifier(random_state=42, n_estimators=300, min_samples_split=5, max_depth=4) <predict_on_test>
def merge_data(train, test, dfs): for df in dfs: train = train.merge(df, on='SK_ID_CURR', how='left') test = test.merge(df, on='SK_ID_CURR', how='left') return train, test train, test = merge_data(train, test, [safe, prob, loan_type, final_status2, cash_dpd, card_dpd, p_good2, p_bad2, ins_comp] )
Home Credit Default Risk
1,194,140
rf.fit(train_2[feats], train_2['nota_mat']) preds = rf.predict(valid[feats]) <save_to_csv>
def rm_collinear(train, test): threshold = 0.9 corr_matrix = train.corr().abs() upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1 ).astype(np.bool)) to_drop = [column for column in upper.columns if any(upper[column] > threshold)and column != 'SK_ID_CURR'] train = train.drop(columns = to_drop) test = test.drop(columns = to_drop) return train, test, to_drop train, test, collin_feat = rm_collinear(train, test )
Home Credit Default Risk
1,194,140
test_submissao['nota_mat']=rf.predict(test_submissao[feats]) test_submissao[['codigo_mun','nota_mat']].to_csv('respostas.csv', index=False) <compute_test_metric>
from sklearn.preprocessing import MinMaxScaler from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.metrics import roc_auc_score, accuracy_score, confusion_matrix from sklearn.model_selection import train_test_split from sklearn.calibration import CalibratedClassifierCV from lightgbm import LGBMClassifier from time import time
Home Credit Default Risk
1,194,140
accuracy_score(valid['nota_mat'], preds) <compute_test_metric>
x_train = train.drop(columns = ['TARGET','SK_ID_CURR']) x_test = test.drop(columns=['SK_ID_CURR']) features = list(x_train.columns) scaler = MinMaxScaler(feature_range =(0, 1)) scaler.fit(x_train) x_train = scaler.transform(x_train) x_test = scaler.transform(x_test )
Home Credit Default Risk
1,194,140
accuracy_score(test['nota_mat'], rf.predict(test[feats])) <compute_train_metric>
gb = GradientBoostingClassifier(n_estimators=30, learning_rate=0.1, min_samples_split=4, random_state=10, verbose=1) ab = AdaBoostClassifier(n_estimators=300, learning_rate=1, random_state=123) rf0 = RandomForestClassifier(n_estimators=49, max_depth = 5, random_state=100) lr = LogisticRegression(C=1, random_state=100) nn = MLPClassifier(hidden_layer_sizes=(32,), alpha=0.01, learning_rate_init=0.01, max_iter=100, batch_size=4082) dt = DecisionTreeClassifier(max_features=100) lgb = LGBMClassifier(n_estimators=2000, learning_rate=0.02, objective='binary', reg_lambda=0.1, num_leaves=34 )
Home Credit Default Risk
1,194,140
rf=RandomForestRegressor(random_state=42, n_estimators=100) rf.fit(train_2[feats], train_2['nota_mat']) preds = rf.predict(valid[feats]) mean_squared_error(valid['nota_mat'], preds)**(1/2) <train_model>
x_train_x, x_train_v, y_train_x, y_train_v = train_test_split(x_train, train_labels, test_size=0.1, random_state=168) def run_model_try(ml): start = time() model = ml model.fit(x_train_x, y_train_x) train_pred = model.predict_proba(x_train_x)[:,1] v_pred = model.predict_proba(x_train_v)[:,1] train_auc = roc_auc_score(y_train_x, train_pred) v_auc = roc_auc_score(y_train_v, v_pred) train_acc = accuracy_score(y_train_x, model.predict(x_train_x)) v_acc = accuracy_score(y_train_v, model.predict(x_train_v)) end = time() print("Training and validation auc: %.4f, %.4f" %(train_auc, v_auc)) print("Training and validation accuracy: %.4f, %.4f" %(train_acc, v_acc)) print("Time used: %.2f" %(end-start)) return model def run_model_real(ml): model = ml model.fit(x_train, train_labels) return model
Home Credit Default Risk
1,194,140
dt=DecisionTreeRegressor(random_state=44) dt.fit(train_2[feats], train_2['nota_mat']) dt.predict(train_2[feats]) data = tree.export_graphviz(dt, out_file=None, feature_names=feats, class_names=['nota_mat'], filled=True, rounded=True, special_characters=True, max_depth=2 )<load_from_csv>
model = run_model_real(lgb) test_pred = model.predict_proba(x_test)[:,1]
Home Credit Default Risk
1,194,140
<load_from_csv><EOS>
submit = test[['SK_ID_CURR']] submit['TARGET'] = test_pred submit.to_csv('lgb0825.csv', index = False )
Home Credit Default Risk
1,457,238
<SOS> metric: AUC Kaggle data source: home-credit-default-risk<prepare_x_and_y>
warnings.filterwarnings('ignore' )
Home Credit Default Risk
1,457,238
y=X_set.Revenue<correct_missing_values>
app_train = pd.read_csv('.. /input/application_train.csv') print('training data shape: ', app_train.shape) app_train.head()
Home Credit Default Risk
1,457,238
X_set.dropna(axis=0, subset=['Revenue'], inplace=True )<drop_column>
app_test = pd.read_csv('.. /input/application_test.csv') print('testing data shape: ', app_test.shape) app_test.head()
Home Credit Default Risk
1,457,238
X_set.drop(['Revenue'], axis=1, inplace=True )<split>
dataset.isnull().sum()
Home Credit Default Risk
1,457,238
X_train, X_valid, y_train, y_valid = train_test_split(X_set, y,test_size=0.2,random_state=0 )<define_variables>
dataset.dtypes.value_counts()
Home Credit Default Risk
1,457,238
s =(X_train.dtypes == 'object' )<define_variables>
dataset.select_dtypes('object' ).apply(pd.Series.nunique, axis = 0 )
Home Credit Default Risk
1,457,238
object_cols = list(s[s].index )<define_variables>
le = LabelEncoder() count = 0 le_vars = [] for col in dataset: if dataset[col].dtype == 'object': if len(list(dataset[col].unique())) == 2: le.fit(dataset[col]) dataset[col] = le.transform(dataset[col]) count += 1 le_vars.append(col) print('%d columns were label encoded' % count) print(le_vars )
Home Credit Default Risk
1,457,238
good_label_cols = [col for col in object_cols if set(X_train[col])== set(X_valid[col])]<categorify>
dataset = pd.get_dummies(dataset) print('dataset data shape: ', dataset.shape )
Home Credit Default Risk
1,457,238
label_encoder = LabelEncoder()<categorify>
dataset['CNT_CHILDREN_outlier'] = dataset['CNT_CHILDREN'] > 6 for i in dataset['CNT_CHILDREN']: if i > 6: dataset['CNT_CHILDREN'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
for col in set(good_label_cols): X_train[col] = label_encoder.fit_transform(X_train[col]) X_valid[col] = label_encoder.transform(X_valid[col]) X_test[col] = label_encoder.transform(X_test[col] )<choose_model_class>
dataset['DAYS_EMPLOYED_outlier'] = dataset['DAYS_EMPLOYED'] == 365243 dataset['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
myimputer = SimpleImputer()<create_dataframe>
dataset['OWN_CAR_AGE_outlier'] = dataset['OWN_CAR_AGE'] > 60 for i in dataset['OWN_CAR_AGE']: if i > 60: dataset['OWN_CAR_AGE'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
imX_train = pd.DataFrame(myimputer.fit_transform(X_train)) imX_valid = pd.DataFrame(myimputer.transform(X_valid))<choose_model_class>
print(np.nanpercentile(dataset['CNT_FAM_MEMBERS'], 99))
Home Credit Default Risk
1,457,238
model = RandomForestClassifier(n_estimators=100,random_state=2) <prepare_output>
dataset['CNT_FAM_MEMBERS_outlier'] = dataset['CNT_FAM_MEMBERS'] > 5 for i in dataset['CNT_FAM_MEMBERS']: if i > 5: dataset['CNT_FAM_MEMBERS'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
X_train = imX_train X_valid = imX_valid<train_model>
for i in dataset['REGION_RATING_CLIENT_W_CITY']: if i == -1: dataset['REGION_RATING_CLIENT_W_CITY'].replace({i: 1}, inplace = True )
Home Credit Default Risk
1,457,238
model.fit(X_train,y_train )<predict_on_test>
dataset['OBS_30_CNT_SOCIAL_CIRCLE_outlier'] = dataset['OBS_30_CNT_SOCIAL_CIRCLE'] > 17 for i in dataset['OBS_30_CNT_SOCIAL_CIRCLE']: if i > 17: dataset['OBS_30_CNT_SOCIAL_CIRCLE'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
pred = model.predict(X_valid )<compute_test_metric>
dataset['DEF_30_CNT_SOCIAL_CIRCLE_outlier'] = dataset['DEF_30_CNT_SOCIAL_CIRCLE'] > 5 for i in dataset['DEF_30_CNT_SOCIAL_CIRCLE']: if i > 5: dataset['DEF_30_CNT_SOCIAL_CIRCLE'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
error = accuracy_score(pred,y_valid )<compute_test_metric>
dataset['OBS_60_CNT_SOCIAL_CIRCLE_outlier'] = dataset['OBS_60_CNT_SOCIAL_CIRCLE'] > 16 for i in dataset['OBS_60_CNT_SOCIAL_CIRCLE']: if i > 16: dataset['OBS_60_CNT_SOCIAL_CIRCLE'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
print(confusion_matrix(pred,y_valid))<count_missing_values>
dataset['DEF_60_CNT_SOCIAL_CIRCLE_outlier'] = dataset['DEF_60_CNT_SOCIAL_CIRCLE'] > 4 for i in dataset['DEF_60_CNT_SOCIAL_CIRCLE']: if i > 4: dataset['DEF_60_CNT_SOCIAL_CIRCLE'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk