kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
4,160,759
class LungSliceModelGenerator(kutils.Sequence): 'Generates data for Keras' def __init__(self, mapping_df, batch_size, shuffle=True): 'Initialization' self.mapping_df = mapping_df self.data_num = mapping_df.shape[0] self.batch_size = batch_size self.shuffle = shuffle self.on_epoch_end() def __len__(self): 'Denotes the number of batches per epoch' return int(np.floor(self.data_num / self.batch_size)) def __getitem__(self, index): 'Generate one batch of data' batch_mapping_df = \ self.mapping_df.iloc[index*self.batch_size:(index+1)*self.batch_size] X, y = self.__data_generation(batch_mapping_df) return X, y def on_epoch_end(self): 'Updates indexes after each epoch' if self.shuffle: self.mapping_df = self.mapping_df.sample(frac=1 ).reset_index(drop=True) def __data_generation(self, batch_mapping_df): 'Generates data containing batch_size samples' X = np.zeros(( self.batch_size, 512, 512, 1)) y = np.zeros(( self.batch_size, 512, 512, 1)) cnt = 0 for i, row in batch_mapping_df.iterrows() : X[cnt, :, :, 0] = np.load(row['image'])['image'] y[cnt, :, :, 0] = np.load(row['label'])['label'] cnt += 1 return X, y<choose_model_class>
df = pd.read_csv(".. /input/dfcsv/df.csv" )
Home Credit Default Risk
4,160,759
batch_size = 16 slice_generator = LungSliceModelGenerator(map_df, batch_size=batch_size )<compute_test_metric>
df_model = df[df['TARGET'].notnull() ] feats = [f for f in df_model.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']] train_x, test_x, train_y, test_y = train_test_split(df_model[feats], df_model['TARGET'], random_state=42) df_submission = df.loc[df['TARGET'].isnull() , feats] main_id_submission =df.loc[df['TARGET'].isnull() , 'SK_ID_CURR'] del df
Home Credit Default Risk
4,160,759
def _dice_coefficient(threshold = 0.3): def hard_dice_coefficient(y_true, y_pred, smooth=1.0): y_true_f = K.flatten(K.cast(y_true > threshold, dtype=float)) y_pred_f = K.flatten(K.cast(y_pred > threshold, dtype=float)) intersection = K.sum(y_true_f * y_pred_f) return(2.* intersection + smooth)/(K.sum(y_true_f)+ K.sum(y_pred_f)+ smooth) return hard_dice_coefficient def dice_coefficient_loss(y_true, y_pred): return 1 - _dice_coefficient()(y_true, y_pred )<choose_model_class>
DEBUG = False
Home Credit Default Risk
4,160,759
def unet(pretrained_weights=None, input_size=[512, 512, 1], depth=3, init_filter=8, filter_size=3, padding='same', pool_size=[2, 2], strides=[2, 2]): inputs = klayers.Input(input_size) current_layer = inputs encoding_layers = [] for d in range(depth + 1): num_filters = init_filter * 2 ** d conv = klayers.Conv2D(num_filters, filter_size, padding=padding, kernel_initializer='he_normal' )(current_layer) conv = klayers.BatchNormalization()(conv) conv = klayers.Activation('relu' )(conv) conv = klayers.Conv2D(num_filters * 2, filter_size, padding=padding, kernel_initializer='he_normal' )(conv) conv = klayers.BatchNormalization()(conv) conv = klayers.Activation('relu' )(conv) encoding_layers.append(conv) pool = klayers.MaxPooling2D(pool_size=pool_size )(conv) if d == depth: current_layer = conv else: current_layer = pool for d in range(depth, 0, -1): num_filters = init_filter * 2 ** d up = klayers.Deconvolution2D(num_filters * 2, pool_size, strides=strides )(current_layer) crop_layer = encoding_layers[d - 1] up_shape = np.array(up._keras_shape[1:-1]) conv_shape = np.array(crop_layer._keras_shape[1:-1]) crop_left =(conv_shape - up_shape)// 2 crop_right =(conv_shape - up_shape)// 2 +(conv_shape - up_shape)% 2 crop_sizes = tuple(zip(crop_left, crop_right)) crop = klayers.Cropping2D(cropping=crop_sizes )(crop_layer) up = klayers.Concatenate(axis=-1 )([crop, up]) conv = klayers.Conv2D(num_filters, filter_size, padding=padding, kernel_initializer='he_normal' )(up) conv = klayers.BatchNormalization()(conv) conv = klayers.Activation('relu' )(conv) conv = klayers.Conv2D(num_filters, filter_size, padding=padding, kernel_initializer='he_normal' )(conv) conv = klayers.BatchNormalization()(conv) conv = klayers.Activation('relu' )(conv) current_layer = conv outputs = klayers.Conv2D(1, 1, padding=padding, kernel_initializer='he_normal' )(current_layer) outputs = klayers.Activation('sigmoid' )(outputs) model = Model(inputs=inputs, outputs=outputs) if(pretrained_weights): model.load_weights(pretrained_weights) return model<choose_model_class>
ITER = 1 SCORES = [] MINUTES = time.time() if DEBUG == True: init_pt = 1 n_iter_pt = 2 PT_GRAPH = 3 else: init_pt = 10 n_iter_pt = 100 PT_GRAPH = 10 def lgb_evaluate( numLeaves, maxDepth, minChildWeight, subsample, colsample_bytree, learn_rate, reg_alpha, reg_lambda, min_split_gain): global ITER, SCORES, MINUTES clf = LGBMClassifier( nthread=4, n_estimators=100, verbose =-1, silent=-1, num_leaves= int(numLeaves), max_depth= int(maxDepth), min_child_weight= minChildWeight, colsample_bytree= colsample_bytree, subsample= subsample, learning_rate= learn_rate, reg_alpha = reg_alpha, reg_lambda= reg_lambda, min_split_gain= min_split_gain ) scores = cross_val_score(clf, train_x, train_y, cv=5, scoring='roc_auc') print("Mean cross validation score: {}".format(np.mean(scores))) SCORES.append(np.mean(scores)) if ITER % PT_GRAPH == 0: plt.figure(figsize=(11,4)) plt.plot(range(len(SCORES)) , SCORES) plt.scatter(SCORES.index(max(SCORES)) , max(SCORES), color='red') plt.ylabel("Score") plt.xlabel("Attempt") plt.title("Real time evolution of the mean score") plt.show() print("Minutes since beginning: {}".format(float(time.time() - MINUTES)/ 60)) ITER = ITER + 1 return np.mean(scores) lgbBO = BayesianOptimization(lgb_evaluate, { 'numLeaves':(5, 50), 'maxDepth':(2, 63), 'minChildWeight':(0.01, 70), 'subsample':(0.4, 1), 'colsample_bytree':(0.4, 1), 'learn_rate':(0.1, 1), 'reg_alpha':(0, 1), 'reg_lambda':(0, 1), 'min_split_gain':(0, 1) }) lgbBO.maximize(init_points=init_pt, n_iter=n_iter_pt )
Home Credit Default Risk
4,160,759
model = unet(depth=3) model.compile(optimizer=Adam(lr=1e-3), loss='binary_crossentropy', metrics=[_dice_coefficient(0.5)]) model.summary()<train_model>
best = max([lgbBO.res[i]['target'] for i in range(len(lgbBO.res)) ]) best
Home Credit Default Risk
4,160,759
model_folder = os.path.join('./model', 'sample-code') if not os.path.exists(model_folder): os.makedirs(model_folder) callbacks = [] callbacks.append(ModelCheckpoint(os.path.join(model_folder, 'model-{epoch:03d}.h5'), save_best_only=False, period=5))<train_model>
best_index = [lgbBO.res[i]['target'] for i in range(len(lgbBO.res)) ].index(best) best_index
Home Credit Default Risk
4,160,759
history = model.fit_generator(slice_generator, epochs=15, verbose=1, callbacks=callbacks )<predict_on_test>
param_dict = lgbBO.res[best_index]["params"] clf = LGBMClassifier( nthread=4, n_estimators=100, silent=-1, verbose=-1, num_leaves=34, colsample_bytree=param_dict["colsample_bytree"], subsample=param_dict["subsample"], max_depth=int(param_dict["maxDepth"]), min_child_weight=param_dict["minChildWeight"], learning_rate=param_dict["learn_rate"], reg_alpha=param_dict["reg_alpha"], reg_lambda=param_dict["reg_lambda"], min_split_gain=param_dict["min_split_gain"]) clf.fit(train_x, train_y )
Home Credit Default Risk
4,160,759
def retrieve_pred_str(src_dir, model, threshold=0.4): encode_name = src_dir.split('/')[-1] _, test_volume = load_dicom_volume(src_dir, suffix='*.dcm') pred_label = model.predict(np.expand_dims(test_volume, axis=-1)) pred_label = np.transpose(pred_label[:, :, :, 0], axes=(2, 1, 0)) pred_label =(pred_label > threshold ).astype(np.int) label_flatten = pred_label.flatten() label_flatten_idx = np.where(label_flatten == 1)[0] label_str = '' if label_flatten_idx.size > 0: prev_idx = label_flatten_idx[0] idx_start = label_flatten_idx[0] cnt = 1 for _idx in label_flatten_idx[1:]: if _idx == prev_idx+1: cnt += 1 else: label_str += str(idx_start)+ ' ' + str(cnt)+ ' ' cnt = 1 idx_start = _idx prev_idx = _idx label_str = label_str.rstrip(' ') return(encode_name, label_str )<load_from_csv>
print(metrics.auc(fpr, tpr))
Home Credit Default Risk
4,160,759
sample_submission = np.genfromtxt('.. /input/sample_submission.csv', delimiter=',', dtype='str', skip_header = 1 )<define_variables>
importance_df = pd.DataFrame() importance_df["feature"] = feats importance_df["importance"] = clf.feature_importances_ importance_df = importance_df.sort_values(by='importance', ascending=False) importance_df = importance_df.reset_index(drop=True )
Home Credit Default Risk
4,160,759
test_encode_list = sample_submission[:, 0]<categorify>
display_importances(importance_df )
Home Credit Default Risk
4,160,759
pred_pair_list = [] for encode_name in tqdm.tqdm(test_encode_list, total=len(test_encode_list)) : (encode, label_str)= retrieve_pred_str(os.path.join(test_image_folder, encode_name), model, threshold=0.4) pred_pair_list.append(( encode, label_str))<save_to_csv>
best_feature = importance_df.loc[0:30, "feature"].values
Home Credit Default Risk
4,160,759
solution_path = './sample-code_pred.csv' with open(solution_path, 'w')as f: f.write('encode,pixel_value ') for _pair in pred_pair_list: encode = _pair[0] label_str = _pair[1] f.write(encode + ',' + label_str + ' ' )<import_modules>
values_x = pd.concat([train_x, test_x]) values_y = pd.concat([train_y, test_y] )
Home Credit Default Risk
4,160,759
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import keras from sklearn import metrics from sklearn.preprocessing import LabelEncoder,OneHotEncoder from keras.models import Model from keras.layers import LSTM, Activation, Dense, Dropout, Input, Embedding from keras.optimizers import RMSprop from keras.preprocessing.text import Tokenizer from keras.preprocessing import sequence from keras.callbacks import EarlyStopping<import_modules>
param_dict = lgbBO.res[best_index]["params"] clf = LGBMClassifier( nthread=4, n_estimators=100, silent=-1, verbose=-1, num_leaves=34, colsample_bytree=param_dict["colsample_bytree"], subsample=param_dict["subsample"], max_depth=int(param_dict["maxDepth"]), min_child_weight=param_dict["minChildWeight"], learning_rate=param_dict["learn_rate"], reg_alpha=param_dict["reg_alpha"], reg_lambda=param_dict["reg_lambda"], min_split_gain=param_dict["min_split_gain"]) clf.fit(values_x, values_y )
Home Credit Default Risk
4,160,759
import json from pandas.io.json import json_normalize<load_from_csv>
filename = 'clf.sav' pickle.dump(clf, open(filename, 'wb'))
Home Credit Default Risk
4,160,759
raw_data=pd.read_json(".. /input/datamininglab2/tweets_DM.json",lines=True) tweets=json_normalize(data=raw_data['_source']) identify=pd.read_csv(".. /input/datamininglab2/data_identification.csv") emotion=pd.read_csv(".. /input/datamininglab2/emotion.csv" )<merge>
y_pred_proba = clf.predict_proba(df_submission)[:, 1] df_results = pd.DataFrame(columns =['SK_ID_CURR', 'TARGET']) df_results['SK_ID_CURR'] = main_id_submission df_results['TARGET'] = y_pred_proba
Home Credit Default Risk
4,160,759
<load_pretrained><EOS>
df_results.to_csv("submission.csv", index=False )
Home Credit Default Risk
1,511,034
<SOS> metric: AUC Kaggle data source: home-credit-default-risk<load_pretrained>
pd.options.display.max_columns = 999 warnings.filterwarnings('ignore') os.environ['OMP_NUM_THREADS'] = '4'
Home Credit Default Risk
1,511,034
train_df = pd.read_pickle(".. /input/dm-competition-tweets-emotion/train_df.pkl") test_df = pd.read_pickle(".. /input/dm-competition-tweets-emotion/test_df.pkl" )<feature_engineering>
train = pd.read_csv(".. /input/application_train.csv") test = pd.read_csv(".. /input/application_test.csv") previous = pd.read_csv(".. /input/previous_application.csv") bureau = pd.read_csv(".. /input/bureau.csv" )
Home Credit Default Risk
1,511,034
tknzr = TweetTokenizer()<string_transform>
previous['AMT_APPLICATION'].replace(0,np.nan, inplace = True) previous['AMT_CREDIT'].replace(0,np.nan, inplace = True) previous['AMT_GOODS_PRICE'].replace(0,np.nan,inplace =True) previous['RATE_DOWN_PAYMENT'].replace(0, np.nan, inplace = True) previous['AMT_ANNUITY'].replace(0, np.nan, inplace = True) previous['CNT_PAYMENT'].replace(0, np.nan, inplace = True )
Home Credit Default Risk
1,511,034
tknzr = TweetTokenizer(strip_handles=True, reduce_len=True) s1 = '@remy: This is waaaaayyyy too much for you!!!!!!' tknzr.tokenize(s1 )<train_model>
for i in ['Revolving loans','Cash loans', 'Consumer loans']: tmp = previous[(previous['NAME_CONTRACT_TYPE'] == i)&(previous['DAYS_LAST_DUE'] == 365243)] for df in [train,test]: tmp1 = tmp.groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index() tmp1.columns = ['SK_ID_CURR','des'] tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp1, on=['SK_ID_CURR'], how='left') df['count_notfinish_' + "_".join(i.lower().split())] = tmp_merge['des'].fillna(0 )
Home Credit Default Risk
1,511,034
tknzr = TweetTokenizer(preserve_case=False) tfidf = TfidfVectorizer(max_features=20000, stop_words='english', tokenizer=tknzr.tokenize) tfidf.fit(train_df['text'] )<categorify>
for i in ['Revolving loans','Cash loans', 'Consumer loans']: tmp = previous[(previous['NAME_CONTRACT_TYPE'] == i)&(previous['DAYS_LAST_DUE'] == 365243)] for df in [train,test]: tmp1 = tmp.groupby(['SK_ID_CURR'])['AMT_CREDIT'].agg({"returns": [np.mean, np.sum]})\ .reset_index() tmp1.columns = ['SK_ID_CURR','des1','des2'] tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp1, on=['SK_ID_CURR'], how='left') df['mean_notfinish_' + "_".join(i.lower().split())] = tmp_merge['des1'].fillna(0) df['sum_notfinish_' + "_".join(i.lower().split())] = tmp_merge['des2'].fillna(0 )
Home Credit Default Risk
1,511,034
X_train = tfidf.transform(train_df['text']) X_train.shape<categorify>
for i in ['Revolving loans','Cash loans', 'Consumer loans']: tmp = previous[(previous['NAME_CONTRACT_TYPE'] == i)&(previous['DAYS_TERMINATION'] == 365243)] for df in [train,test]: tmp1 = tmp.groupby(['SK_ID_CURR'])['AMT_ANNUITY'].agg({"returns": [np.mean, np.sum]})\ .reset_index() tmp1.columns = ['SK_ID_CURR','des1','des2'] tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp1, on=['SK_ID_CURR'], how='left') df['mean_annuity_notfinish_' + "_".join(i.lower().split())] = tmp_merge['des1'].fillna(0) df['sum_annuity_notfinish_' + "_".join(i.lower().split())] = tmp_merge['des2'].fillna(0 )
Home Credit Default Risk
1,511,034
X_test = tfidf.transform(test_df['text']) X_test.shape<prepare_x_and_y>
previous['SELLERPLACE_AREA'].replace(0, np.nan, inplace = True) previous['SELLERPLACE_AREA'].replace(-1, np.nan, inplace = True) previous['DAYS_TERMINATION'].replace(365243, np.nan, inplace = True) previous['DAYS_LAST_DUE'].replace(365243, np.nan, inplace = True) previous['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace = True) previous['sooner'] =(previous['DAYS_LAST_DUE_1ST_VERSION'] - previous['DAYS_LAST_DUE'])/(previous['DAYS_LAST_DUE_1ST_VERSION']-previous['DAYS_DECISION']) previous['duration'] = previous['DAYS_TERMINATION'] - previous['DAYS_DECISION'] previous['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace = True )
Home Credit Default Risk
1,511,034
y_train = train_df['emotion'] y_test = test_df['emotion']<load_from_csv>
train['DAYS_EMPLOYED'] = train['DAYS_EMPLOYED'].replace(365243, np.nan) test['DAYS_EMPLOYED'] = test['DAYS_EMPLOYED'].replace(365243, np.nan) tmp = train[train['DAYS_LAST_PHONE_CHANGE'] >= 0].index train['DAYS_LAST_PHONE_CHANGE'].iloc[tmp] = np.nan tmp = test[test['DAYS_LAST_PHONE_CHANGE'] >= 0].index test['DAYS_LAST_PHONE_CHANGE'].iloc[tmp] = np.nan for df in [train, test]: df['ORGANIZATION_TYPE_v2'] = df['ORGANIZATION_TYPE'] for i in range(1,4): df['ORGANIZATION_TYPE_v2'].replace('Business Entity Type ' + str(i), 'Business', inplace = True) for i in range(1,14): df['ORGANIZATION_TYPE_v2'].replace('Industry: type ' + str(i), 'Industry', inplace = True) for i in range(1,8): df['ORGANIZATION_TYPE_v2'].replace('Trade: type ' + str(i), 'Trade', inplace = True) for i in range(1,8): df['ORGANIZATION_TYPE_v2'].replace('Transport: type ' + str(i), 'Transport', inplace = True) df['ORGANIZATION_TYPE_v2'].replace('Other','XNA', inplace = True )
Home Credit Default Risk
1,511,034
model_compare=pd.read_csv(".. /input/dm-competition-tweets-emotion/final.csv") model_compare<train_model>
tmp = previous[(previous['NAME_CONTRACT_STATUS'] == 'Approved')&(previous['NAME_CONTRACT_TYPE'].isin(['Consumer loans','Cash loans', 'Revolving loans'])) ].groupby(['SK_ID_CURR'])['AMT_CREDIT']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_amt_credit_master'] = tmp_merge['des1'] df['max_amt_credit_master'] = tmp_merge['des2'] df['mean_amt_credit_master'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
lr = LogisticRegression(C=6,n_jobs=-1,max_iter=1000) lr.fit(X_train,y_train )<predict_on_test>
tmp = previous[(previous['NAME_CONTRACT_STATUS'] == 'Approved')&(previous['NAME_CONTRACT_TYPE'].isin(['Cash loans'])) ].groupby(['SK_ID_CURR'])['AMT_APPLICATION']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_amt_app'] = tmp_merge['des1'] df['max_amt_app'] = tmp_merge['des2'] df['mean_amt_app'] = tmp_merge['des3'] tmp = previous[(previous['NAME_CONTRACT_STATUS'] == 'Approved')&(previous['NAME_CONTRACT_TYPE'].isin(['Consumer loans'])) ].groupby(['SK_ID_CURR'])['AMT_APPLICATION']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_amt_app_v1'] = tmp_merge['des1'] df['max_amt_app_v1'] = tmp_merge['des2'] df['mean_amt_app_v1'] = tmp_merge['des3'] tmp = previous[(previous['NAME_CONTRACT_STATUS'] == 'Approved')&(previous['NAME_CONTRACT_TYPE'].isin(['Revolving loans'])) ].groupby(['SK_ID_CURR'])['AMT_CREDIT']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_amt_card'] = tmp_merge['des1'] df['max_amt_card'] = tmp_merge['des2'] df['mean_amt_card'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
pred_result_lr = lr.predict(X_test) pred_result_lr.shape<save_to_csv>
tmp = previous[(previous['NAME_CONTRACT_STATUS'].isin(['Refused','Canceled'])) &(previous['NAME_CONTRACT_TYPE'].isin(['Cash loans'])) ].groupby(['SK_ID_CURR'])['AMT_APPLICATION']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_amt_app_fail'] = tmp_merge['des1'] df['max_amt_app_fail'] = tmp_merge['des2'] df['mean_amt_app_fail'] = tmp_merge['des3'] tmp = previous[(previous['NAME_CONTRACT_STATUS'].isin(['Refused','Canceled'])) &(previous['NAME_CONTRACT_TYPE'].isin(['Consumer loans'])) ].groupby(['SK_ID_CURR'])['AMT_APPLICATION']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_amt_app_v1_fail'] = tmp_merge['des1'] df['max_amt_app_v1_fail'] = tmp_merge['des2'] df['mean_amt_app_v1_fail'] = tmp_merge['des3'] tmp = previous[(previous['NAME_CONTRACT_STATUS'].isin(['Refused','Canceled'])) &(previous['NAME_CONTRACT_TYPE'].isin(['Revolving loans'])) ].groupby(['SK_ID_CURR'])['AMT_CREDIT']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_amt_card_fail'] = tmp_merge['des1'] df['max_amt_card_fail'] = tmp_merge['des2'] df['mean_amt_card_fail'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
test_df['emotion']=pred_result_lr test_df.drop(columns=['hashtags','text'],inplace=True) test_df.index.rename('id',inplace=True) test_df.columns=['emotion'] test_df.to_csv('lr_tfidf.csv' )<load_pretrained>
tmp = previous[(previous['NAME_CONTRACT_TYPE'].isin(['Cash loans'])) ].groupby(['SK_ID_CURR'])['RATE_DOWN_PAYMENT']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_amt_goods'] = tmp_merge['des1'] df['max_amt_goods'] = tmp_merge['des2'] df['mean_amt_goods'] = tmp_merge['des3'] tmp = previous[(previous['NAME_CONTRACT_TYPE'].isin(['Consumer loans'])) ].groupby(['SK_ID_CURR'])['RATE_DOWN_PAYMENT']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_amt_goods_v1'] = tmp_merge['des1'] df['max_amt_goods_v1'] = tmp_merge['des2'] df['mean_amt_goods_v1'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
train_df = pd.read_pickle(".. /input/dm-competition-tweets-emotion/train_df.pkl") test_df = pd.read_pickle(".. /input/dm-competition-tweets-emotion/test_df.pkl" )<train_model>
tmp = previous[(previous['NAME_CONTRACT_TYPE'].isin(['Cash loans','Consumer loans'])) ].groupby(['SK_ID_CURR'])['AMT_ANNUITY']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_amt_annuity'] = tmp_merge['des1'] df['max_amt_annuity'] = tmp_merge['des2'] df['mean_amt_annuity'] = tmp_merge['des3'] tmp = previous[(previous['NAME_CONTRACT_TYPE'].isin(['Revolving loans'])) ].groupby(['SK_ID_CURR'])['AMT_ANNUITY']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_amt_card_annuity'] = tmp_merge['des1'] df['max_amt_card_annuity'] = tmp_merge['des2'] df['mean_amt_card_annuity'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
max_words = 20000 max_len = 300 tok = Tokenizer(num_words=max_words) tok.fit_on_texts(train_df['text'] )<string_transform>
tmp = previous[(previous['NAME_CONTRACT_TYPE'].isin(['Cash loans','Consumer loans'])) ].groupby(['SK_ID_CURR'])['CNT_PAYMENT']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_cntpay'] = tmp_merge['des1'] df['max_cntpay'] = tmp_merge['des2'] df['mean_cntpay'] = tmp_merge['des3'] tmp = previous[(previous['NAME_CONTRACT_TYPE'].isin(['Consumer loans'])) ].groupby(['SK_ID_CURR'])['CNT_PAYMENT']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_cntpay_v1'] = tmp_merge['des1'] df['max_cntpay_v1'] = tmp_merge['des2'] df['mean_cntpay_v1'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
train_seq = tok.texts_to_sequences(train_df['text']) test_seq = tok.texts_to_sequences(test_df['text']) train_seq_mat = sequence.pad_sequences(train_seq,maxlen=max_len) test_seq_mat = sequence.pad_sequences(test_seq,maxlen=max_len) print(train_seq_mat.shape) print(test_seq_mat.shape )<categorify>
tmp = previous[(previous['NAME_CONTRACT_STATUS'] == 'Approved')&(previous['AMT_APPLICATION'] > 0)&(previous['NAME_CONTRACT_TYPE'].isin(['Cash loans','Consumer loans'])) ].sort_values(by=['SK_ID_CURR','DAYS_DECISION']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-1 ).reset_index() tmp = tmp[['SK_ID_CURR','AMT_APPLICATION']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_recent_app'] = tmp_merge['des'] tmp = previous[(previous['NAME_CONTRACT_STATUS'] == 'Approved')&(previous['AMT_APPLICATION'] > 0)&(previous['NAME_CONTRACT_TYPE'].isin(['Cash loans','Consumer loans'])) ].sort_values(by=['SK_ID_CURR','DAYS_DECISION']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-1 ).reset_index() tmp = tmp[['SK_ID_CURR','AMT_CREDIT']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_recent_credit'] = tmp_merge['des'] tmp = previous[(previous['NAME_CONTRACT_STATUS'] == 'Approved')&(previous['AMT_CREDIT'] > 0)&(previous['NAME_CONTRACT_TYPE'].isin(['Revolving loans'])) ].sort_values(by=['SK_ID_CURR','DAYS_DECISION']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-1 ).reset_index() tmp = tmp[['SK_ID_CURR','AMT_CREDIT']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_recent_card'] = tmp_merge['des']
Home Credit Default Risk
1,511,034
label_encoder = LabelEncoder() label_encoder.fit(y_train) print('check label: ', label_encoder.classes_) print(' print('y_train[0:4]: ', y_train[0:4]) print(' y_train.shape: ', y_train.shape) print('y_test.shape: ', y_test.shape) def label_encode(le, labels): enc = le.transform(labels) return keras.utils.to_categorical(enc) def label_decode(le, one_hot_label): dec = np.argmax(one_hot_label, axis=1) return le.inverse_transform(dec) y_train = label_encode(label_encoder, y_train) y_test = label_encode(label_encoder, y_test) print(' print('y_train[0:4]: ', y_train[0:4]) print(' y_train.shape: ', y_train.shape) print('y_test.shape: ', y_test.shape )<categorify>
tmp = previous[(previous['NAME_CONTRACT_STATUS'] != 'Approved')&(previous['AMT_APPLICATION'] > 0)&(previous['NAME_CONTRACT_TYPE'].isin(['Cash loans','Consumer loans'])) ].sort_values(by=['SK_ID_CURR','DAYS_DECISION']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-1 ).reset_index() tmp = tmp[['SK_ID_CURR','AMT_APPLICATION']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_recent_app_fail'] = tmp_merge['des'] tmp = previous[(previous['NAME_CONTRACT_STATUS'] != 'Approved')&(previous['AMT_APPLICATION'] > 0)&(previous['NAME_CONTRACT_TYPE'].isin(['Cash loans','Consumer loans'])) ].sort_values(by=['SK_ID_CURR','DAYS_DECISION']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-1 ).reset_index() tmp = tmp[['SK_ID_CURR','AMT_CREDIT']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_recent_credit_fail'] = tmp_merge['des'] tmp = previous[(previous['NAME_CONTRACT_STATUS'] != 'Approved')&(previous['AMT_CREDIT'] > 0)&(previous['NAME_CONTRACT_TYPE'].isin(['Revolving loans'])) ].sort_values(by=['SK_ID_CURR','DAYS_DECISION']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-1 ).reset_index() tmp = tmp[['SK_ID_CURR','AMT_CREDIT']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_recent_card_fail'] = tmp_merge['des']
Home Credit Default Risk
1,511,034
input_shape = X_train.shape[1] print('input_shape: ', input_shape) output_shape = len(label_encoder.classes_) print('output_shape: ', output_shape )<choose_model_class>
tmp = previous[(previous['NAME_CONTRACT_STATUS'] == 'Approved')&(previous['RATE_DOWN_PAYMENT'] > 0)].sort_values(by=['SK_ID_CURR','DAYS_DECISION']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-1 ).reset_index() tmp = tmp[['SK_ID_CURR','RATE_DOWN_PAYMENT']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_recent_ratedown'] = tmp_merge['des'] tmp = previous[(previous['NAME_CONTRACT_STATUS'] != 'Approved')&(previous['RATE_DOWN_PAYMENT'] > 0)].sort_values(by=['SK_ID_CURR','DAYS_DECISION']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-1 ).reset_index() tmp = tmp[['SK_ID_CURR','RATE_DOWN_PAYMENT']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_recent_ratedown_fail'] = tmp_merge['des']
Home Credit Default Risk
1,511,034
inputs = Input(name='inputs',shape=[max_len]) layer = Embedding(max_words+1,128,input_length=max_len )(inputs) layer = LSTM(128 )(layer) layer = Dense(128,activation="relu",name="FC1" )(layer) layer = Dropout(0.5 )(layer) layer = Dense(output_shape,activation="softmax",name="FC2" )(layer) model = Model(inputs=inputs,outputs=layer) model.summary() model.compile(loss="categorical_crossentropy",optimizer=RMSprop() ,metrics=["accuracy"] )<train_model>
tmp = previous[(previous['NAME_CONTRACT_TYPE'].isin(['Consumer loans','Cash loans'])) &(previous['CNT_PAYMENT'] > 0)].sort_values(by=['SK_ID_CURR','DAYS_DECISION']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-1 ).reset_index() tmp = tmp[['SK_ID_CURR','CNT_PAYMENT']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_recent_cntpay'] = tmp_merge['des']
Home Credit Default Risk
1,511,034
model_fit = model.fit(train_seq_mat,y_train,batch_size=128,epochs=3, callbacks=[EarlyStopping(monitor='val_loss',min_delta=0.0001)]) <predict_on_test>
tmp = previous[previous['AMT_CREDIT'] > 0] for i in ['Cash loans','Consumer loans','Revolving loans']: for df in [train,test]: tmp1 = tmp[tmp['NAME_CONTRACT_TYPE'] == i].groupby(['SK_ID_CURR'])['AMT_CREDIT'].count().reset_index() tmp1.columns = ['SK_ID_CURR','des'] tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp1, on=['SK_ID_CURR'], how='left') df['count_' + "_".join(i.lower().split())] = tmp_merge['des']
Home Credit Default Risk
1,511,034
pred_result_lstm = label_decode(label_encoder, model.predict(test_seq_mat, batch_size=128)) pred_result_lstm[:5]<save_to_csv>
tmp = previous[previous['AMT_CREDIT'].isnull() ] for i in ['Cash loans','Consumer loans','Revolving loans']: for df in [train,test]: tmp1 = tmp[tmp['NAME_CONTRACT_TYPE'] == i].groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index() tmp1.columns = ['SK_ID_CURR','des'] tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp1, on=['SK_ID_CURR'], how='left') df['count_null_' + "_".join(i.lower().split())] = tmp_merge['des']
Home Credit Default Risk
1,511,034
test_df['emotion']=pred_result_lstm test_df.drop(columns=['hashtags','text'],inplace=True) test_df.index.rename('id',inplace=True) test_df.columns=['emotion'] test_df.to_csv('keras_tfidf.csv' )<load_from_csv>
tmp = previous[previous['AMT_CREDIT'] > 0].groupby(['SK_ID_CURR'])['DAYS_DECISION']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_day_decision'] = tmp_merge['des1'] df['max_day_decision'] = tmp_merge['des2'] df['mean_day_decision'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
model_compare=pd.read_csv(".. /input/dm-competition-tweets-emotion/final.csv") model_compare<import_modules>
tmp = previous[previous['NAME_CONTRACT_STATUS'] != 'Approved'].groupby(['SK_ID_CURR'])['DAYS_DECISION']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_day_decision_fail'] = tmp_merge['des1'] df['max_day_decision_fail'] = tmp_merge['des2'] df['mean_day_decision_fail'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
import pandas as pd import numpy as np import sklearn import matplotlib.pyplot as plt from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score from sklearn import preprocessing<load_from_csv>
tmp = previous[(~previous['NAME_CASH_LOAN_PURPOSE'].isin(['XAP','XNA'])) ] for df in [train,test]: tmp1 = tmp.groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index() tmp1.columns = ['SK_ID_CURR','des'] tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp1, on=['SK_ID_CURR'], how='left') df['count_clear_reason'] = tmp_merge['des']
Home Credit Default Risk
1,511,034
adult = pd.read_csv(".. /input/adult-data/train_data.csv", sep=r'\s*,\s*', engine='python', na_values="?") adult.head()<correct_missing_values>
tmp = previous.groupby(['SK_ID_CURR'])['DAYS_TERMINATION']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_day_termination'] = tmp_merge['des1'] df['max_day_termination'] = tmp_merge['des2'] df['mean_day_termination'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
adult = adult.dropna()<count_values>
tmp = previous.groupby(['SK_ID_CURR'])['DAYS_LAST_DUE_1ST_VERSION']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_day_lastdue'] = tmp_merge['des1'] df['max_day_lastdue'] = tmp_merge['des2'] df['mean_day_lastdue'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
adult["native.country"].value_counts()<load_from_csv>
tmp = previous[~previous['DAYS_LAST_DUE_1ST_VERSION'].isnull() ].sort_values(by=['SK_ID_CURR','DAYS_LAST_DUE_1ST_VERSION']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-2 ).reset_index() tmp = tmp[['SK_ID_CURR','DAYS_LAST_DUE_1ST_VERSION']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['2nd_day_lastdue'] = tmp_merge['des']
Home Credit Default Risk
1,511,034
testadult = pd.read_csv(".. /input/adult-data/test_data.csv", sep=r'\s*,\s*', engine='python', na_values="?") testadult.head()<feature_engineering>
tmp = previous.groupby(['SK_ID_CURR'])['sooner']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_sooner'] = tmp_merge['des1'] df['max_sooner'] = tmp_merge['des2'] df['mean_sooner'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
adult.loc[adult['sex']!="Male", 'sex'] = '0' adult.loc[adult['sex']=="Male", 'sex'] = '1' testadult.loc[testadult['sex']!="Male", 'sex'] = '0' testadult.loc[testadult['sex']=="Male", 'sex'] = '1'<count_values>
tmp = previous.groupby(['SK_ID_CURR'])['SELLERPLACE_AREA']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_seller'] = tmp_merge['des1'] df['max_seller'] = tmp_merge['des2'] df['mean_seller'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
adult['sex'].value_counts()<count_values>
for i in ['middle','low_normal','high','low_action']: tmp = previous[(previous['NAME_CONTRACT_TYPE'] == 'Cash loans')&(previous['NAME_YIELD_GROUP'] == i)] for df in [train,test]: tmp1 = tmp.groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index() tmp1.columns = ['SK_ID_CURR','des'] tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp1, on=['SK_ID_CURR'], how='left') df['count_' + str(i)] = tmp_merge['des'].fillna(0 )
Home Credit Default Risk
1,511,034
testadult['sex'].value_counts()<feature_engineering>
for df in [train,test]: df['tmp'] = df[['count_middle','count_low_normal','count_high','count_low_action']].sum(axis=1) for i in ['middle','low_normal','high','low_action']: df['ratio_' + i] = df['count_' + i]/df['tmp']
Home Credit Default Risk
1,511,034
adult.loc[adult['race']!="White", 'race'] = '0' adult.loc[adult['race']=="White", 'race'] = '1' testadult.loc[testadult['race']!="White", 'race'] = '0' testadult.loc[testadult['race']=="White", 'race'] = '1'<count_values>
for i in ['middle','low_normal','high','low_action']: tmp = previous[(previous['NAME_CONTRACT_TYPE'] == 'Consumer loans')&(previous['NAME_YIELD_GROUP'] == i)] for df in [train,test]: tmp1 = tmp.groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index() tmp1.columns = ['SK_ID_CURR','des'] tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp1, on=['SK_ID_CURR'], how='left') df['count_' + str(i)+ '_v1'] = tmp_merge['des'].fillna(0 )
Home Credit Default Risk
1,511,034
adult['race'].value_counts()<count_values>
for df in [train,test]: df['tmp'] = df[['count_middle_v1','count_low_normal_v1','count_high_v1','count_low_action_v1']].sum(axis=1) for i in ['middle','low_normal','high','low_action']: df['ratio_' + i +"_v1"] = df['count_' + i + "_v1"]/df['tmp']
Home Credit Default Risk
1,511,034
testadult['race'].value_counts()<feature_engineering>
previous['tmp'] =(previous['AMT_ANNUITY'] * previous['CNT_PAYMENT'])/previous['AMT_CREDIT'] tmp = previous[(previous['NAME_CONTRACT_TYPE'] == 'Cash loans')&(previous['NAME_CONTRACT_STATUS'] != 'Approved')].groupby(['SK_ID_CURR'])['tmp']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_interest'] = tmp_merge['des1'] df['max_interest'] = tmp_merge['des2'] df['mean_interest'] = tmp_merge['des3'] tmp = previous[(previous['NAME_CONTRACT_TYPE'] == 'Consumer loans')&(previous['NAME_CONTRACT_STATUS'] != 'Approved')].groupby(['SK_ID_CURR'])['tmp']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_interest_v1'] = tmp_merge['des1'] df['max_interest_v1'] = tmp_merge['des2'] df['mean_interest_v1'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
adult.loc[adult['native.country']!="United-States", 'native.country'] = '0' adult.loc[adult['native.country']=="United-States", 'native.country'] = '1' testadult.loc[testadult['native.country']!="United-States", 'native.country'] = '0' testadult.loc[testadult['native.country']=="United-States", 'native.country'] = '1'<count_values>
tmp = previous.groupby(['SK_ID_CURR'])['DAYS_FIRST_DRAWING']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_firstdraw'] = tmp_merge['des1'] df['max_firstdraw'] = tmp_merge['des2'] df['mean_firstdraw'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
adult['native.country'].value_counts()<count_values>
previous['tmp'] = previous['DAYS_FIRST_DRAWING'] - previous['DAYS_DECISION'] tmp = previous.groupby(['SK_ID_CURR'])['tmp']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_firstdraw_decision'] = tmp_merge['des1'] df['max_firstdraw_decision'] = tmp_merge['des2'] df['mean_firstdraw_decision'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
testadult['native.country'].value_counts()<train_model>
previous['tmp'] = previous['DAYS_FIRST_DUE'] - previous['DAYS_FIRST_DRAWING'] tmp = previous.groupby(['SK_ID_CURR'])['tmp']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_firstdraw_firstdue'] = tmp_merge['des1'] df['max_firstdraw_firstdue'] = tmp_merge['des2'] df['mean_firstdraw_firstdue'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
Xadult = adult[['education.num','age','race','sex','capital.gain','capital.loss','hours.per.week']] Yadult = adult.income Xtestadult = testadult[['education.num','age','race','sex','capital.gain','capital.loss','hours.per.week']] knn = KNeighborsClassifier(n_neighbors=25) knn.fit(Xadult,Yadult )<compute_train_metric>
previous['tmp'] = previous['DAYS_LAST_DUE'] - previous['DAYS_FIRST_DRAWING'] tmp = previous.groupby(['SK_ID_CURR'])['tmp']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_firstdraw_lastdue'] = tmp_merge['des1'] df['max_firstdraw_lastdue'] = tmp_merge['des2'] df['mean_firstdraw_lastdue'] = tmp_merge['des3']
Home Credit Default Risk
1,511,034
cval = 10 scores = cross_val_score(knn, Xadult, Yadult, cv=cval) scores<define_variables>
tmp = bureau[(bureau['CREDIT_ACTIVE'] == "Active")].groupby(['SK_ID_CURR'])['SK_ID_BUREAU'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_active_bureau'] = tmp_merge['des'].fillna(0) tmp = bureau[bureau['CREDIT_ACTIVE'] != "Active"].groupby(['SK_ID_CURR'])['SK_ID_BUREAU'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_closed_bureau'] = tmp_merge['des'].fillna(0 )
Home Credit Default Risk
1,511,034
total = 0 for i in scores: total += i acuracia_esperada = total/cval acuracia_esperada<predict_on_test>
tmp = bureau[(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['SK_ID_BUREAU'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_active_bureau_v2'] = tmp_merge['des'].fillna(0) tmp = bureau[(bureau['CREDIT_ACTIVE'] != "Active")&(bureau['CREDIT_TYPE'] == "Credit card")].groupby(['SK_ID_CURR'])['SK_ID_BUREAU'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_closed_bureau_v2'] = tmp_merge['des'].fillna(0 )
Home Credit Default Risk
1,511,034
YtestPred = knn.predict(Xtestadult) YtestPred<define_variables>
tmp = bureau[(~bureau['CREDIT_TYPE'].isin(["Credit card","Consumer credit"])) ].groupby(['SK_ID_CURR'])['SK_ID_BUREAU'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_active_bureau_v3'] = tmp_merge['des'].fillna(0) tmp = bureau[(bureau['CREDIT_ACTIVE'] != "Active")&(~bureau['CREDIT_TYPE'].isin(["Credit card","Consumer credit"])) ].groupby(['SK_ID_CURR'])['SK_ID_BUREAU'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_closed_bureau_v3'] = tmp_merge['des'].fillna(0 )
Home Credit Default Risk
1,511,034
maior_50 = 0 menor_50 = 0 for i in YtestPred: if i == '<=50K': menor_50 += 1 else: maior_50 += 1 dicio = {'<=50K':menor_50, '>50K':maior_50}<save_to_csv>
bureau['AMT_CREDIT_SUM'].replace(0, np.nan, inplace = True) tmp = bureau[(bureau['CREDIT_ACTIVE'] == "Active")&(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_active_credit_bureau'] = tmp_merge['des1'] df['max_active_credit_bureau'] = tmp_merge['des2'] df['mean_active_credit_bureau'] = tmp_merge['des3'] df['sum_active_credit_bureau'] = tmp_merge['des4'] tmp = bureau[(bureau['CREDIT_ACTIVE'] != "Active")&(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_closed_credit_bureau'] = tmp_merge['des1'] df['max_closed_credit_bureau'] = tmp_merge['des2'] df['mean_closed_credit_bureau'] = tmp_merge['des3'] df['sum_closed_credit_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
result = np.vstack(( testadult["Id"], YtestPred)).T x = ["Id","income"] Resultado = pd.DataFrame(columns = x, data = result) Resultado.to_csv("Resultado1.csv", index = False )<train_model>
bureau['AMT_CREDIT_SUM'].replace(0, np.nan, inplace = True) tmp = bureau[(bureau['CREDIT_TYPE'] == "Credit card")].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_active_credit_bureau_v1'] = tmp_merge['des1'] df['max_active_credit_bureau_v1'] = tmp_merge['des2'] df['mean_active_credit_bureau_v1'] = tmp_merge['des3'] df['sum_active_credit_bureau_v1'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
Xadult = adult[['education.num','age','sex','capital.gain','capital.loss','hours.per.week','native.country']] Yadult = adult.income Xtestadult = testadult[['education.num','age','sex','capital.gain','capital.loss','hours.per.week','native.country']] knn = KNeighborsClassifier(n_neighbors=25) knn.fit(Xadult,Yadult )<compute_train_metric>
bureau['AMT_CREDIT_SUM'].replace(0, np.nan, inplace = True) tmp = bureau[(bureau['CREDIT_TYPE'] == "Car loan")].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_active_credit_bureau_v2'] = tmp_merge['des1'] df['max_active_credit_bureau_v2'] = tmp_merge['des2'] df['mean_active_credit_bureau_v2'] = tmp_merge['des3'] df['sum_active_credit_bureau_v2'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
cval = 10 scores = cross_val_score(knn, Xadult, Yadult, cv=cval) scores<define_variables>
bureau['AMT_CREDIT_SUM'].replace(0, np.nan, inplace = True) tmp = bureau[(~bureau['CREDIT_TYPE'].isin(["Credit card","Consumer credit","Car loan"])) ].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_active_credit_bureau_v3'] = tmp_merge['des1'] df['max_active_credit_bureau_v3'] = tmp_merge['des2'] df['mean_active_credit_bureau_v3'] = tmp_merge['des3'] df['sum_active_credit_bureau_v3'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
total = 0 for i in scores: total += i acuracia_esperada = total/cval acuracia_esperada<predict_on_test>
tmp = bureau.groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_credit_bureau'] = tmp_merge['des1'] df['max_credit_bureau'] = tmp_merge['des2'] df['mean_credit_bureau'] = tmp_merge['des3'] df['sum_credit_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
YtestPred = knn.predict(Xtestadult) YtestPred<define_variables>
tmp = bureau[(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['DAYS_CREDIT_ENDDATE'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_endate_bureau'] = tmp_merge['des1'] df['max_endate_bureau'] = tmp_merge['des2'] df['mean_endate_bureau'] = tmp_merge['des3'] df['sum_endate_bureau'] = tmp_merge['des4'] tmp = bureau[(~bureau['DAYS_CREDIT_ENDDATE'].isnull())&(( bureau['CREDIT_TYPE'] == "Consumer credit")) ].sort_values(by=['SK_ID_CURR','DAYS_CREDIT_ENDDATE']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-2 ).reset_index() tmp = tmp[['SK_ID_CURR','DAYS_CREDIT_ENDDATE']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_endate_bureau'] = tmp_merge['des']
Home Credit Default Risk
1,511,034
maior_50 = 0 menor_50 = 0 for i in YtestPred: if i == '<=50K': menor_50 += 1 else: maior_50 += 1 dicio = {'<=50K':menor_50, '>50K':maior_50}<save_to_csv>
tmp = bureau[(bureau['CREDIT_TYPE'] == "Car loan")].groupby(['SK_ID_CURR'])['DAYS_CREDIT_ENDDATE'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_endate_bureau_v1'] = tmp_merge['des1'] df['max_endate_bureau_v1'] = tmp_merge['des2'] df['mean_endate_bureau_v1'] = tmp_merge['des3'] df['sum_endate_bureau_v1'] = tmp_merge['des4'] tmp = bureau[(~bureau['DAYS_CREDIT_ENDDATE'].isnull())&(( bureau['CREDIT_TYPE'] == "Car loan")) ].sort_values(by=['SK_ID_CURR','DAYS_CREDIT_ENDDATE']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-2 ).reset_index() tmp = tmp[['SK_ID_CURR','DAYS_CREDIT_ENDDATE']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_endate_bureau_v1'] = tmp_merge['des']
Home Credit Default Risk
1,511,034
result = np.vstack(( testadult["Id"], YtestPred)).T x = ["Id","income"] Resultado = pd.DataFrame(columns = x, data = result) Resultado.to_csv("Resultado2.csv", index = False )<train_model>
tmp = bureau[(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['DAYS_CREDIT'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_startdate_bureau'] = tmp_merge['des1'] df['max_startdate_bureau'] = tmp_merge['des2'] df['mean_startdate_bureau'] = tmp_merge['des3'] df['sum_startdate_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
Xadult = adult[['education.num','race','sex','capital.gain','capital.loss','hours.per.week','native.country']] Yadult = adult.income Xtestadult = testadult[['education.num','race','sex','capital.gain','capital.loss','hours.per.week','native.country']] knn = KNeighborsClassifier(n_neighbors=25) knn.fit(Xadult,Yadult )<compute_train_metric>
tmp = bureau[(~bureau['DAYS_CREDIT_ENDDATE'].isnull())&(( bureau['CREDIT_TYPE'] == "Consumer credit")) ].sort_values(by=['SK_ID_CURR','DAYS_CREDIT_ENDDATE']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-2 ).reset_index() tmp = tmp[['SK_ID_CURR','DAYS_ENDDATE_FACT']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_endatefact_bureau'] = tmp_merge['des']
Home Credit Default Risk
1,511,034
cval = 10 scores = cross_val_score(knn, Xadult, Yadult, cv=cval) scores<define_variables>
tmp = bureau[(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['DAYS_ENDDATE_FACT'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_endatefact_bureau'] = tmp_merge['des1'] df['max_endatefact_bureau'] = tmp_merge['des2'] df['mean_endatefact_bureau'] = tmp_merge['des3'] df['sum_endatefact_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
total = 0 for i in scores: total += i acuracia_esperada = total/cval acuracia_esperada<predict_on_test>
bureau['tmp'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT'] tmp = bureau.groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_deltaendate_bureau'] = tmp_merge['des1'] df['max_deltaendate_bureau'] = tmp_merge['des2'] df['mean_deltaendate_bureau'] = tmp_merge['des3'] df['sum_deltaendate_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
YtestPred = knn.predict(Xtestadult) YtestPred<define_variables>
bureau['tmp'] =(bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_CREDIT']) tmp = bureau[(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_duration_bureau'] = tmp_merge['des1'] df['max_duration_bureau'] = tmp_merge['des2'] df['mean_duration_bureau'] = tmp_merge['des3'] df['sum_duration_bureau'] = tmp_merge['des4'] bureau['tmp'] =(bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_CREDIT']) tmp = bureau[(bureau['CREDIT_TYPE'] != "Consumer credit")].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_duration_bureau_v1'] = tmp_merge['des1'] df['max_duration_bureau_v1'] = tmp_merge['des2'] df['mean_duration_bureau_v1'] = tmp_merge['des3'] df['sum_duration_bureau_v1'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
maior_50 = 0 menor_50 = 0 for i in YtestPred: if i == '<=50K': menor_50 += 1 else: maior_50 += 1 dicio = {'<=50K':menor_50, '>50K':maior_50}<save_to_csv>
bureau['tmp'] =(bureau['DAYS_ENDDATE_FACT'] - bureau['DAYS_CREDIT']) tmp = bureau[(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_durationfact_bureau'] = tmp_merge['des1'] df['max_durationfact_bureau'] = tmp_merge['des2'] df['mean_durationfact_bureau'] = tmp_merge['des3'] df['sum_durationfact_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
result = np.vstack(( testadult["Id"], YtestPred)).T x = ["Id","income"] Resultado = pd.DataFrame(columns = x, data = result) Resultado.to_csv("Resultado3.csv", index = False )<load_from_csv>
bureau['tmp'] =(bureau['DAYS_ENDDATE_FACT'] - bureau['DAYS_CREDIT_ENDDATE'])/(bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE']) tmp = bureau[(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_sooner_bureau'] = tmp_merge['des1'] df['max_sooner_bureau'] = tmp_merge['des2'] df['mean_sooner_bureau'] = tmp_merge['des3'] df['sum_sooner_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
raw = pd.read_csv(".. /input/badult/train_data.csv", names= None, engine='python', na_values = '?' )<set_options>
bureau['tmp'] =(bureau['DAYS_ENDDATE_FACT'] - bureau['DAYS_CREDIT_ENDDATE'])/(bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE']) tmp = bureau[(~bureau['CREDIT_TYPE'].isin(['Credit card','Consumer credit'])) ].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_sooner_bureau_v1'] = tmp_merge['des1'] df['max_sooner_bureau_v1'] = tmp_merge['des2'] df['mean_sooner_bureau_v1'] = tmp_merge['des3'] df['sum_sooner_bureau_v1'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
clean = raw.dropna() clean.info()<count_unique_values>
bureau['tmp'] =(bureau['AMT_CREDIT_SUM'])/(bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE']) tmp = bureau[bureau['CREDIT_TYPE'] == "Credit card"].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_annuity_bureau'] = tmp_merge['des1'] df['max_annuity_bureau'] = tmp_merge['des2'] df['mean_annuity_bureau'] = tmp_merge['des3'] df['sum_annuity_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
obg = raw[['workclass','education','marital.status','occupation','relationship','race','sex','native.country','income']] obg.nunique()<categorify>
bureau['tmp'] =(bureau['AMT_CREDIT_SUM'])/(bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE']) tmp = bureau[(bureau['CREDIT_ACTIVE'] == "Active")&(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_annuity_bureau_v1'] = tmp_merge['des1'] df['max_annuity_bureau_v1'] = tmp_merge['des2'] df['mean_annuity_bureau_v1'] = tmp_merge['des3'] df['sum_annuity_bureau_v1'] = tmp_merge['des4'] tmp = bureau[(bureau['CREDIT_ACTIVE'] == "Active")&(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_annuity_bureau_v2'] = tmp_merge['des1'] df['max_annuity_bureau_v2'] = tmp_merge['des2'] df['mean_annuity_bureau_v2'] = tmp_merge['des3'] df['sum_annuity_bureau_v2'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
analysis = clean analysis = analysis.apply(preprocessing.LabelEncoder().fit_transform) plt.matshow(analysis.corr() )<sort_values>
bureau['tmp'] =(bureau['AMT_CREDIT_SUM'])/(bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE']) tmp = bureau[~bureau['CREDIT_TYPE'].isin(["Credit card","Consumer credit"])].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_annuity_bureau_v3'] = tmp_merge['des1'] df['max_annuity_bureau_v3'] = tmp_merge['des2'] df['mean_annuity_bureau_v3'] = tmp_merge['des3'] df['sum_annuity_bureau_v3'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
anl0 = analysis.corr().income.sort_values(ascending=True) anl0<categorify>
bureau['AMT_CREDIT_SUM_DEBT_v1'] = bureau['AMT_CREDIT_SUM_DEBT'].replace(0, np.nan) tmp = bureau[(bureau['CREDIT_ACTIVE'] == "Active")&(bureau['CREDIT_TYPE'].isin(["Credit card"])) ].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM_DEBT_v1'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_debt_bureau'] = tmp_merge['des1'] df['max_debt_bureau'] = tmp_merge['des2'] df['mean_debt_bureau'] = tmp_merge['des3'] df['sum_debt_bureau'] = tmp_merge['des4'] tmp = bureau[(bureau['CREDIT_ACTIVE'] == "Active")&(bureau['CREDIT_TYPE'].isin(["Consumer credit"])) ].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM_DEBT_v1'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_debt_bureau_v1'] = tmp_merge['des1'] df['max_debt_bureau_v1'] = tmp_merge['des2'] df['mean_debt_bureau_v1'] = tmp_merge['des3'] df['sum_debt_bureau_v1'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
anl1 = pd.get_dummies(clean[['relationship','marital.status','capital.loss', 'sex', 'hours.per.week', 'age', 'education.num', 'capital.gain', 'income']]) anl1 = anl1.corr().loc[:,'income_>50K'].sort_values(ascending=True) anl1<categorify>
bureau['AMT_CREDIT_SUM_LIMIT_v1'] = bureau['AMT_CREDIT_SUM_LIMIT'].replace(0, np.nan) tmp = bureau[(bureau['CREDIT_TYPE'].isin(["Credit card"])) ].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM_LIMIT_v1'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_limit_bureau'] = tmp_merge['des1'] df['max_limit_bureau'] = tmp_merge['des2'] df['mean_limit_bureau'] = tmp_merge['des3'] df['sum_limit_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
anl1_5 = pd.get_dummies(clean) anl1_5 = anl1_5.corr().loc[:,'income_>50K'].sort_values(ascending=True ).where(lambda x : abs(x)> 0.15 ).dropna() anl1_5<categorify>
bureau['AMT_CREDIT_MAX_OVERDUE_v1'] = bureau['AMT_CREDIT_MAX_OVERDUE'].replace(0,np.nan) tmp = bureau[(bureau['CREDIT_TYPE'].isin(["Consumer credit"])) ].groupby(['SK_ID_CURR'])['AMT_CREDIT_MAX_OVERDUE_v1'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_overdue_bureau'] = tmp_merge['des1'].fillna(0) df['max_overdue_bureau'] = tmp_merge['des2'].fillna(0) df['mean_overdue_bureau'] = tmp_merge['des3'].fillna(0) df['sum_overdue_bureau'] = tmp_merge['des4'].fillna(0 )
Home Credit Default Risk
1,511,034
anl2 = clean[['occupation','income','race']] anl2 = pd.get_dummies(anl2 ).drop(columns = 'income_<=50K') anl2 = anl2.corr().loc[:,'income_>50K'].sort_values(ascending=True ).where(lambda x : abs(x)> 0.088 ).dropna() anl2<categorify>
bureau['tmp'] = bureau['AMT_CREDIT_SUM_DEBT']/bureau['AMT_CREDIT_SUM'] tmp = bureau.groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_ratio_debt_credit_bureau'] = tmp_merge['des1'] df['max_ratio_debt_credit_bureau'] = tmp_merge['des2'] df['mean_ratio_debt_credit_bureau'] = tmp_merge['des3'] df['sum_ratio_debt_credit_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
train_clean = pd.get_dummies(clean) index = anl1.where(lambda x : abs(x)> 0.07 ).dropna().index[1:-1].append(anl2.index[:-1] )<load_from_csv>
bureau['tmp'] = bureau['AMT_ANNUITY'].fillna(0) tmp = bureau.groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_annuity_bureau_v2'] = tmp_merge['des1'] df['max_annuity_bureau_v2'] = tmp_merge['des2'] df['mean_annuity_bureau_v2'] = tmp_merge['des3'] df['sum_annuity_bureau_v2'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
test_raw = pd.read_csv(".. /input/badult/test_data.csv", names= None, engine='python' )<prepare_x_and_y>
install = pd.read_csv(".. /input/installments_payments.csv" )
Home Credit Default Risk
1,511,034
X_train = train_clean[index].drop(columns='sex_Female') Y_train = train_clean.loc[:,'income_>50K']<categorify>
tmp = install.groupby(['SK_ID_PREV','SK_ID_CURR'] ).agg({'NUM_INSTALMENT_NUMBER':["count","max"]} ).reset_index() tmp.columns = ['SK_ID_PREV','SK_ID_CURR','count','max'] tmp['delta'] = tmp['count'] - tmp['max'] tmp = tmp.merge(previous[['SK_ID_PREV','CNT_PAYMENT','DAYS_LAST_DUE','NAME_CONTRACT_TYPE']], on=['SK_ID_PREV'], how='left') tmp_1 = tmp.groupby(['SK_ID_CURR'])['delta'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp_1.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp_1, on=['SK_ID_CURR'], how='left') df['min_delta_num_install'] = tmp_merge['des1'] df['max_delta_num_install'] = tmp_merge['des2'] df['mean_delta_num_install'] = tmp_merge['des3'] df['sum_delta_num_install'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
test_clean = pd.get_dummies(test_raw) test_clean = test_clean.dropna() X_test = test_clean[index].drop(columns='sex_Female' )<import_modules>
tmp = install.groupby(['SK_ID_PREV','SK_ID_CURR'] ).agg({'NUM_INSTALMENT_VERSION':["count","max"]} ).reset_index() tmp.columns = ['SK_ID_PREV','SK_ID_CURR','count','max'] tmp = tmp.merge(previous[['SK_ID_PREV','CNT_PAYMENT','DAYS_LAST_DUE','NAME_CONTRACT_TYPE']], on=['SK_ID_PREV'], how='left') tmp_1 = tmp.groupby(['SK_ID_CURR'])['max'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp_1.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp_1, on=['SK_ID_CURR'], how='left') df['min_max_version_install'] = tmp_merge['des1'] df['max_max_version_install'] = tmp_merge['des2'] df['mean_max_version_install'] = tmp_merge['des3'] df['sum_max_version_install'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV<define_search_space>
tmp = install.groupby(['SK_ID_PREV','SK_ID_CURR'] ).agg({'NUM_INSTALMENT_NUMBER':["count","max"]} ).reset_index() tmp.columns = ['SK_ID_PREV','SK_ID_CURR','count','max'] tmp['delta'] = tmp['count']/tmp['max'] tmp = tmp.merge(previous[['SK_ID_PREV','CNT_PAYMENT','DAYS_LAST_DUE','NAME_CONTRACT_TYPE']], on=['SK_ID_PREV'], how='left') tmp_1 = tmp.groupby(['SK_ID_CURR'])['delta'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp_1.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp_1, on=['SK_ID_CURR'], how='left') df['min_ratio_num_install'] = tmp_merge['des1'] df['max_ratio_num_install'] = tmp_merge['des2'] df['mean_ratio_num_install'] = tmp_merge['des3'] df['sum_ratio_num_install'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
k_range = list(range(1, 31)) weight_options = ['uniform', 'distance'] p_options = list(range(1,3)) param_grid = dict(n_neighbors=k_range, p=p_options )<choose_model_class>
tmp = install[['SK_ID_PREV','SK_ID_CURR','NUM_INSTALMENT_NUMBER','AMT_INSTALMENT']].drop_duplicates()
Home Credit Default Risk
1,511,034
knn = KNeighborsClassifier(n_neighbors=5) grid = GridSearchCV(knn, param_grid, cv=10, scoring='accuracy', n_jobs = -2 )<train_model>
tmp = tmp.groupby(['SK_ID_PREV','SK_ID_CURR'])['AMT_INSTALMENT'].sum().reset_index() tmp.columns = ['SK_ID_PREV','SK_ID_CURR','need_to_pay']
Home Credit Default Risk
1,511,034
grid.fit(X_train, Y_train) print(grid.best_estimator_) print(grid.best_score_ )<train_model>
tmp_1 = install.groupby(['SK_ID_PREV'])['AMT_PAYMENT'].sum().reset_index() tmp_1.columns = ['SK_ID_PREV','paid']
Home Credit Default Risk
1,511,034
f_kNN.fit(X_train,Y_train )<predict_on_test>
tmp = tmp.merge(tmp_1, on=['SK_ID_PREV'], how='left' )
Home Credit Default Risk
1,511,034
Y_test = f_kNN.predict(X_test )<save_to_csv>
payment_history = tmp payment_history['ratio'] = payment_history['paid']/payment_history['need_to_pay'] payment_history['delta'] = payment_history['need_to_pay'] - payment_history['paid'] payment_history = payment_history.merge(previous[['SK_ID_PREV','AMT_ANNUITY','CNT_PAYMENT','NAME_CONTRACT_TYPE']], \ on = ['SK_ID_PREV'], how='left') payment_history['all_credit'] = payment_history['AMT_ANNUITY'] * payment_history['CNT_PAYMENT'] payment_history['ratio'] = payment_history['paid']/payment_history['all_credit'] payment_history['delta'] = payment_history['all_credit'] - payment_history['paid'] tmp = install.groupby(['SK_ID_PREV'])['NUM_INSTALMENT_VERSION'].mean().reset_index() tmp.columns = ['SK_ID_PREV','mean_version'] payment_history = payment_history.merge(tmp, on=['SK_ID_PREV'], how='left' )
Home Credit Default Risk
1,511,034
Y_test_copy = Y_test Y_test_copy = Y_test_copy.tolist() answer = [["Id","income"]] for output in range(len(Y_test_copy)) : if Y_test_copy[output] == 0: Y_test_copy[output] = '<=50K' else: Y_test_copy[output] = '>50K' answer.append([output,Y_test_copy[output]]) myFile = open("submit.csv", 'w') with myFile: writer = csv.writer(myFile) writer.writerows(answer )<set_options>
tmp = payment_history[payment_history['mean_version'] > 0].groupby(['SK_ID_CURR'])['ratio'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_ratio_paid_install'] = tmp_merge['des1'] df['max_ratio_paid_install'] = tmp_merge['des2'] df['mean_ratio_paid_install'] = tmp_merge['des3'] df['sum_ratio_paid_install'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
pd.set_option('display.max_columns', 999) warnings.filterwarnings(action='ignore', category=DataConversionWarning )<load_from_csv>
tmp = payment_history[payment_history['mean_version'] > 0].groupby(['SK_ID_CURR'])['delta'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_delta_paid_install'] = tmp_merge['des1'] df['max_delta_paid_install'] = tmp_merge['des2'] df['mean_delta_paid_install'] = tmp_merge['des3'] df['sum_delta_paid_install'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
events = pd.read_csv('.. /input/events_up_to_01062018.csv', low_memory=False) labels = pd.read_csv('.. /input/labels_training_set.csv') test = pd.read_csv('.. /input/trocafone_kaggle_test.csv' )<compute_train_metric>
tmp = install.groupby(['SK_ID_PREV','SK_ID_CURR'] ).agg({'NUM_INSTALMENT_NUMBER':["count","max"]} ).reset_index() tmp.columns = ['SK_ID_PREV','SK_ID_CURR','count','max'] tmp['delta'] = tmp['count']/tmp['max'] tmp = tmp.merge(previous[['SK_ID_PREV','CNT_PAYMENT','DAYS_LAST_DUE','NAME_CONTRACT_TYPE']], on=['SK_ID_PREV'], how='left') tmp_1 = tmp.groupby(['SK_ID_CURR'])['max'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp_1.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp_1, on=['SK_ID_CURR'], how='left') df['min_max_num_install'] = tmp_merge['des1'] df['max_max_num_install'] = tmp_merge['des2'] df['mean_max_num_install'] = tmp_merge['des3'] df['sum_max_num_install'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
def evaluate_model(y_true, model=None, X_test=None, prediction=None, probabilites=None): if model is not None: if prediction is None: prediction = model.predict(X_test) if probabilites is None: probabilites = model.predict_proba(X_test)[:, 1] if prediction is not None: print('Accuracy: ', accuracy_score(y_true, prediction)) print('ROC AUC Predict: ', roc_auc_score(y_true, prediction)) if probabilites is not None: print('Avg Log loss: ', log_loss(y_true, probabilites)) print('Sum Log loss: ', log_loss(y_true, probabilites, normalize=False)) print('ROC AUC Proba: ', roc_auc_score(y_true, probabilites))<compute_train_metric>
tmp = install.groupby(['SK_ID_PREV'])['AMT_PAYMENT'].max().reset_index() tmp.columns = ['SK_ID_PREV','max_install'] payment_history = payment_history.merge(tmp, on=['SK_ID_PREV'], how='left' )
Home Credit Default Risk
1,511,034
all_false = np.zeros(len(labels)) evaluate_model(labels.label, prediction=all_false, probabilites=all_false )<define_variables>
payment_history['tmp'] = payment_history['max_install']/payment_history['AMT_ANNUITY'] tmp = payment_history[payment_history['mean_version'] > 0].groupby(['SK_ID_CURR'])['tmp']
Home Credit Default Risk
1,511,034
full_train = get_train_set(events )<filter>
tmp = install.groupby(['SK_ID_PREV'])['NUM_INSTALMENT_NUMBER'].max().reset_index() tmp.columns = ['SK_ID_PREV','max_num_install'] payment_history = payment_history.merge(tmp, on=['SK_ID_PREV'], how='left' )
Home Credit Default Risk
1,511,034
train = full_train.loc[labels.person] assert all(train.index == labels.person )<normalization>
tmp = install[install['AMT_INSTALMENT'] > install['AMT_PAYMENT']] tmp = tmp.groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_small_payment'] = tmp_merge['des'].fillna(0) for i in [0, 5, 10, 15, 20, 25, 30, 40, 50, 60]: print(i) tmp = install[(install['DAYS_ENTRY_PAYMENT'] - install['DAYS_INSTALMENT'])> i] tmp = tmp.groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_late_payment_' + str(i)] = tmp_merge['des'].fillna(0)
Home Credit Default Risk
1,511,034
scaler = StandardScaler() features = scaler.fit_transform(train) features.shape<prepare_x_and_y>
install['tmp'] = install['AMT_PAYMENT']/install['AMT_INSTALMENT'] for i in range(10): print(i) tmp = install[(install['tmp'] > i/10)&(install['tmp'] <(( i+1)/10)) ] tmp = tmp.groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_ratio_payment_' + str(i)] = tmp_merge['des'].fillna(0)
Home Credit Default Risk
1,511,034
X_train, y_train = features, labels.label<normalization>
tmp = install.groupby(['SK_ID_PREV','NUM_INSTALMENT_NUMBER'])['DAYS_INSTALMENT'].count().reset_index() tmp = tmp[tmp['DAYS_INSTALMENT'] > 1] tmp.columns = ['SK_ID_PREV','NUM_INSTALMENT_NUMBER','count_dup'] install = install.merge(tmp, on = ['SK_ID_PREV','NUM_INSTALMENT_NUMBER'], how='left') dup_install = install[install['count_dup'] > 1] dup_install.reset_index(drop=True, inplace = True)
Home Credit Default Risk
1,511,034
y_test = test.set_index('person') X_test = scaler.transform(full_train.loc[y_test.index] )<train_model>
tmp = install[(install['AMT_PAYMENT'] < install['AMT_INSTALMENT'])&(install['DAYS_ENTRY_PAYMENT'] < install['DAYS_INSTALMENT'])] tmp['ratio'] = tmp['AMT_PAYMENT']/tmp['AMT_INSTALMENT'] tmp = dup_install.groupby(['SK_ID_CURR'])['AMT_PAYMENT'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp_1.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp_1, on=['SK_ID_CURR'], how='left') df['min_special_install'] = tmp_merge['des1'] df['max_special_install'] = tmp_merge['des2'] df['mean_special_install'] = tmp_merge['des3'] df['sum_special_install'] = tmp_merge['des4']
Home Credit Default Risk
1,511,034
<train_model>
dup_install.sort_values(by=['SK_ID_PREV','NUM_INSTALMENT_NUMBER','DAYS_ENTRY_PAYMENT'] )
Home Credit Default Risk
1,511,034
lgb_model = lgb.LGBMClassifier(boosting_type='dart', num_leaves=5, n_estimators=1000, metric='AUC', learning_rate=0.05, colsample_bytree=0.9) lgb_model.fit(X_train, y_train )<compute_test_metric>
credit = pd.read_csv(".. /input/credit_card_balance.csv" )
Home Credit Default Risk