kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
13,964,440
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True) prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True) prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True) prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True) prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True )<feature_engineering>
sub_df['target'] = sub_df.drop(['id', 'target'], axis=1 ).mean(axis=1) sub_df['target'] = sub_df['target'] sub_df = sub_df[['id', 'target']] sub_df.head(3 )
Tabular Playground Series - Jan 2021
13,964,440
<define_variables><EOS>
sub_df.to_csv('submission.csv', index=False )
Tabular Playground Series - Jan 2021
13,961,531
<SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<merge>
%matplotlib inline
Tabular Playground Series - Jan 2021
13,961,531
agg_prev = group(prev, 'PREV_', {**PREVIOUS_AGG, **categorical_agg}) agg_prev = agg_prev.merge(active_agg_df, how='left', on='SK_ID_CURR') del active_agg_df; gc.collect()<merge>
train = pd.read_csv(DATA / "train.csv") test = pd.read_csv(DATA / "test.csv") smpl_sub = pd.read_csv(DATA / "sample_submission.csv") print("train: {}, test: {}, sample sub: {}".format( train.shape, test.shape, smpl_sub.shape ))
Tabular Playground Series - Jan 2021
13,961,531
agg_prev = group_and_merge(approved, agg_prev, 'APPROVED_', PREVIOUS_APPROVED_AGG) refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1] agg_prev = group_and_merge(refused, agg_prev, 'REFUSED_', PREVIOUS_REFUSED_AGG) del approved, refused; gc.collect()<merge>
class TreeModel: def __init__(self, model_type: str): self.model_type = model_type self.trn_data = None self.val_data = None self.model = None def train(self, params: dict, X_train: pd.DataFrame, y_train: np.ndarray, X_val: pd.DataFrame, y_val: np.ndarray, train_weight: tp.Optional[np.ndarray] = None, val_weight: tp.Optional[np.ndarray] = None, train_params: dict = {}): if self.model_type == "lgb": self.trn_data = lgb.Dataset(X_train, label=y_train, weight=train_weight) self.val_data = lgb.Dataset(X_val, label=y_val, weight=val_weight) self.model = lgb.train(params=params, train_set=self.trn_data, valid_sets=[self.trn_data, self.val_data], **train_params) elif self.model_type == "xgb": self.trn_data = xgb.DMatrix(X_train, y_train, weight=train_weight) self.val_data = xgb.DMatrix(X_val, y_val, weight=val_weight) self.model = xgb.train(params=params, dtrain=self.trn_data, evals=[(self.trn_data, "train"),(self.val_data, "val")], **train_params) elif self.model_type == "cat": self.trn_data = Pool(X_train, label=y_train, group_id=[0] * len(X_train)) self.val_data = Pool(X_val, label=y_val, group_id=[0] * len(X_val)) self.model = CatBoost(params) self.model.fit( self.trn_data, eval_set=[self.val_data], use_best_model=True, **train_params) else: raise NotImplementedError def predict(self, X: pd.DataFrame): if self.model_type == "lgb": return self.model.predict( X, num_iteration=self.model.best_iteration) elif self.model_type == "xgb": X_DM = xgb.DMatrix(X) return self.model.predict( X_DM, ntree_limit=self.model.best_ntree_limit) elif self.model_type == "cat": return self.model.predict(X) else: raise NotImplementedError @property def feature_names_(self): if self.model_type == "lgb": return self.model.feature_name() elif self.model_type == "xgb": return list(self.model.get_score(importance_type="gain" ).keys()) elif self.model_type == "cat": return self.model.feature_names_ else: raise NotImplementedError @property def feature_importances_(self): if self.model_type == "lgb": return self.model.feature_importance(importance_type="gain") elif self.model_type == "xgb": return list(self.model.get_score(importance_type="gain" ).values()) elif self.model_type == "cat": return self.model.feature_importances_ else: raise NotImplementedError
Tabular Playground Series - Jan 2021
13,961,531
for loan_type in ['Consumer loans', 'Cash loans']: type_df = prev[prev['NAME_CONTRACT_TYPE_{}'.format(loan_type)] == 1] prefix = 'PREV_' + loan_type.split(" ")[0] + '_' agg_prev = group_and_merge(type_df, agg_prev, prefix, PREVIOUS_LOAN_TYPE_AGG) del type_df; gc.collect()<feature_engineering>
ID_COL = "id" FEAT_COLS = [f"cont{i}" for i in range(1, 15)] TGT_COL = "target" N_SPLITS =10 RANDOM_SEED = 2021 USE_MODEL = "lgb" MODEL_PARAMS = { "objective": "root_mean_squared_error", "boosting": "gbdt", "learning_rate": 0.01, "seed": RANDOM_SEED, 'max_depth': -1, 'colsample_bytree':.85, "subsample":.85, "n_jobs": 2, } TRAIN_PARAMS = { "num_boost_round": 20000, "early_stopping_rounds": 300, "verbose_eval": 100, }
Tabular Playground Series - Jan 2021
13,961,531
pay['LATE_PAYMENT'] = pay['DAYS_ENTRY_PAYMENT'] - pay['DAYS_INSTALMENT'] pay['LATE_PAYMENT'] = pay['LATE_PAYMENT'].apply(lambda x: 1 if x > 0 else 0) dpd_id = pay[pay['LATE_PAYMENT'] > 0]['SK_ID_PREV'].unique()<merge>
X = train[FEAT_COLS] X_test = test[FEAT_COLS] y = train[TGT_COL].values kf = KFold(n_splits=N_SPLITS, shuffle=True, random_state=RANDOM_SEED) trn_val_indexs = list(kf.split(X, y))
Tabular Playground Series - Jan 2021
13,961,531
agg_dpd = group_and_merge(prev[prev['SK_ID_PREV'].isin(dpd_id)], agg_prev, 'PREV_LATE_', PREVIOUS_LATE_PAYMENTS_AGG) del agg_dpd, dpd_id; gc.collect()<merge>
oof_pred_arr = np.zeros(len(X)) test_preds_arr = np.zeros(( N_SPLITS, len(X_test))) feature_importances = pd.DataFrame() score_list = []
Tabular Playground Series - Jan 2021
13,961,531
df = pd.merge(df, agg_prev, on='SK_ID_CURR', how='left' )<split>
for fold,(trn_idx, val_idx)in enumerate(trn_val_indexs): print("*" * 100) print(f"Fold: {fold}") X_trn = X.loc[trn_idx].reset_index(drop=True) X_val = X.loc[val_idx].reset_index(drop=True) y_trn = y[trn_idx] y_val = y[val_idx] model = TreeModel(model_type=USE_MODEL) with timer(prefix="Model training"): model.train( params=MODEL_PARAMS, X_train=X_trn, y_train=y_trn, X_val=X_val, y_val=y_val, train_params=TRAIN_PARAMS) fi_tmp = pd.DataFrame() fi_tmp["feature"] = model.feature_names_ fi_tmp["importance"] = model.feature_importances_ fi_tmp["fold"] = fold feature_importances = feature_importances.append(fi_tmp) val_pred = model.predict(X_val) score = mean_squared_error(y_val, val_pred, squared=False) print(f"score: {score:.5f}") score_list.append([fold, score]) oof_pred_arr[val_idx] = val_pred test_pred = model.predict(X_test) test_preds_arr[fold] = test_pred
Tabular Playground Series - Jan 2021
13,961,531
train = df[df['TARGET'].notnull() ] test = df[df['TARGET'].isnull() ] del df del agg_prev gc.collect()<drop_column>
oof_score = mean_squared_error(y, oof_pred_arr, squared=False) score_list.append(["oof", oof_score]) pd.DataFrame( score_list, columns=["fold", "rmse score"] )
Tabular Playground Series - Jan 2021
13,961,531
<categorify><EOS>
sub = smpl_sub.copy() sub[TGT_COL] = test_preds_arr.mean(axis=0) sub.to_csv("submission.csv", index=False) sub.head()
Tabular Playground Series - Jan 2021
14,687,644
<SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<categorify>
%matplotlib inline dfk = pd.DataFrame({ 'Kernel ID': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'], 'Score': [ 0.69864 , 0.69846 , 0.69836 , 0.69824 , 0.69813, 0.69795, 0.69752, 0.69751, 0.69749, 0.69747, 0.69735, 0.69731, 0.69703, 0.69701, 0.69691, 0.69648, 0.69579], 'File Path': ['.. /input/aa69864/AA69864.csv', '.. /input/bb69846/BB69846.csv', '.. /input/cc69836/CC69836.csv', '.. /input/a69824/A69824.csv', '.. /input/c69813/C69813.csv', '.. /input/ff69795/FF69795.csv', '.. /input/ggg69752/GGG69752.csv', '.. /input/gg69751/GG69751.csv' , '.. /input/g69749/G69749.csv', '.. /input/h69747/H69747.csv', '.. /input/i69735/I69735.csv', '.. /input/j69731/J69731.csv', '.. /input/mmm69703/MMM69703.csv', '.. /input/l69701/L69701.csv', '.. /input/ooo69691/OOO69691.csv', '.. /input/ppp69648/PPP69648.csv', '.. /input/jan21-tabular-playground-4-lb-final-blend/submission.csv'] }) dfk
Tabular Playground Series - Jan 2021
14,687,644
imputer = SimpleImputer(strategy = 'median') imputer.fit(train) imputer.fit(test) train1 = imputer.transform(train) test1 = imputer.transform(test) del train del test gc.collect()<normalization>
def generate(main, support, coeff): g = main.copy() for i in main.columns[1:]: res = [] lm, Is = [], [] lm = main[i].tolist() ls = support[i].tolist() for j in range(len(main)) : res.append(( lm[j] * coeff)+(ls[j] *(1.- coeff))) g[i] = res return g
Tabular Playground Series - Jan 2021
14,687,644
scaler = MinMaxScaler(feature_range =(0, 1)) scaler.fit(train1) scaler.fit(test1) train = scaler.transform(train1) test = scaler.transform(test1) del train1 del test1 gc.collect()<import_modules>
support = pd.read_csv(dfk.iloc[0, 2]) for k in range(1, 11): main = pd.read_csv(dfk.iloc[k, 2]) support = generate(main, support, 0.60) sub1 = support
Tabular Playground Series - Jan 2021
14,687,644
from keras.models import Sequential from keras.layers import Dense<train_model>
sub8 = comparison(sub7, 8, 1.0030, 0.9980)
Tabular Playground Series - Jan 2021
14,687,644
<predict_on_test><EOS>
sub = sub8 sub.to_csv("submission.csv", index=False) sub1.to_csv("submission1.csv", index=False) sub2.to_csv("submission2.csv", index=False) sub3.to_csv("submission3.csv", index=False) sub4.to_csv("submission4.csv", index=False) sub5.to_csv("submission5.csv", index=False) sub6.to_csv("submission6.csv", index=False) sub7.to_csv("submission7.csv", index=False) !ls
Tabular Playground Series - Jan 2021
14,587,054
<SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<save_to_csv>
import pandas as pd import numpy as np import lightgbm as lgbm from sklearn.model_selection import KFold from sklearn.metrics import mean_squared_error
Tabular Playground Series - Jan 2021
14,587,054
submit = test_df[['SK_ID_CURR']] submit['TARGET'] = pred submit.to_csv('NN.csv', index = False )<set_options>
train = pd.read_csv(".. /input/tabular-playground-series-jan-2021/train.csv") test = pd.read_csv(".. /input/tabular-playground-series-jan-2021/test.csv" )
Tabular Playground Series - Jan 2021
14,587,054
%matplotlib inline warnings.filterwarnings('ignore') pd.set_option('display.max_rows', 100) pd.set_option('display.max_columns', 200 )<load_from_csv>
cont_features = [col for col in train.columns if col.startswith("cont")] len(cont_features )
Tabular Playground Series - Jan 2021
14,587,054
app_train = pd.read_csv('.. /input/home-credit-default-risk/application_train.csv') app_test = pd.read_csv('.. /input/home-credit-default-risk/application_test.csv' )<train_model>
y = train["target"] kf = KFold(n_splits=5, shuffle=True, random_state=1) oof = np.zeros(len(train)) score_list = [] fold = 1 test_preds = [] seed_list = [None,2,3] for train_index, test_index in kf.split(train): X_train, X_val = train.iloc[train_index], train.iloc[test_index] y_train, y_val = y.iloc[train_index], y.iloc[test_index] X_train = X_train.abs() y_pred_list = [] for seed in seed_list: dtrain = lgbm.Dataset(X_train[cont_features], y_train) dvalid = lgbm.Dataset(X_val[cont_features], y_val) print(seed) params = {"objective": "regression", "metric": "rmse", "verbosity": -1, "boosting_type": "gbdt", "feature_fraction":0.5, "num_leaves": 250, "lambda_l1":7, "lambda_l2":2, "learning_rate":0.01, 'min_child_samples': 35, "bagging_fraction":0.75, "bagging_freq":1, } params["seed"] = seed model = lgbm.train(params, dtrain, valid_sets=[dtrain, dvalid], verbose_eval=100, num_boost_round=100000, early_stopping_rounds=100 ) dtrain = lgbm.Dataset(X_train[cont_features], y_train) dvalid = lgbm.Dataset(X_val[cont_features], y_val) params = {"objective": "regression", "metric": "rmse", "verbosity": -1, "boosting_type": "gbdt", "feature_fraction":0.5, "num_leaves": 350, "lambda_l1":7, "lambda_l2":1, "learning_rate":0.003, 'min_child_samples': 35, "bagging_fraction":0.8, "bagging_freq":1, } params["seed"] = seed model = lgbm.train(params, dtrain, valid_sets=[dtrain, dvalid], verbose_eval=100, num_boost_round=100000, early_stopping_rounds=200, init_model = model ) y_pred_list.append(model.predict(X_val[cont_features])) print(np.sqrt(mean_squared_error(y_val, np.mean(y_pred_list,axis=0)))) test_preds.append(model.predict(test[cont_features])) oof[test_index] = np.mean(y_pred_list,axis=0) score = np.sqrt(mean_squared_error(y_val, oof[test_index])) score_list.append(score) print(f"RMSE Fold-{fold} : {score}") fold+=1 np.mean(score_list )
Tabular Playground Series - Jan 2021
14,587,054
print('Training data shape: ', app_train.shape) app_train.head() <count_values>
print(score_list) np.mean(score_list )
Tabular Playground Series - Jan 2021
14,587,054
app_train['TARGET'].value_counts()<count_values>
train["1_preds"] = oof test["1_preds"] = np.mean(test_preds,axis=0 )
Tabular Playground Series - Jan 2021
14,587,054
app_train.dtypes.value_counts() <define_variables>
threshold = 8 train["target_class"] = train["target"].apply(lambda x: 1 if x > threshold else 0) train.groupby("target_class")["target"].mean()
Tabular Playground Series - Jan 2021
14,587,054
np.linspace(20,70,num=11 )<feature_engineering>
y = train["target_class"] kf = KFold(n_splits=5, shuffle=True, random_state=1) oof = np.zeros(len(train)) score_list = [] fold = 1 test_preds = [] for train_index, test_index in kf.split(train): X_train, X_val = train.iloc[train_index], train.iloc[test_index] y_train, y_val = y.iloc[train_index], y.iloc[test_index] dtrain_0 = lgbm.Dataset(X_train[cont_features], y_train) dvalid = lgbm.Dataset(X_val[cont_features], y_val) params = {"objective": "binary", "metric": "auc", "verbosity": -1, "boosting_type": "gbdt", "feature_fraction":0.5, "num_leaves": 200, "lambda_l1":7, "lambda_l2":0, "learning_rate":0.01, 'min_child_samples': 0, "bagging_fraction":0.75, "bagging_freq":1, } model_0 = lgbm.train(params, dtrain_0, valid_sets=[dtrain_0, dvalid], verbose_eval=100, num_boost_round=100000, early_stopping_rounds=100 ) y_pred_0 = model_0.predict(X_val[cont_features]) score = roc_auc_score(y_val,y_pred_0) print(f"AUC Fold-{fold} : {score}") oof[test_index] = y_pred_0 score_list.append(score) test_preds.append(model_0.predict(test[cont_features])) fold+=1 np.mean(score_list )
Tabular Playground Series - Jan 2021
14,587,054
age_data=app_train[['TARGET','DAYS_BIRTH']] age_data['DAYS_BIRTH']=-age_data['DAYS_BIRTH'] age_data['YEARS_BIRTH']=age_data['DAYS_BIRTH']/365 age_data['YEARS_BINNED']=pd.cut(age_data['YEARS_BIRTH'],bins=np.linspace(20,70,num=11)) age_data.head(10 )<groupby>
train["class_preds"] = oof test["class_preds"] = np.mean(test_preds,axis=0) roc_auc_score(train["target_class"],train["class_preds"] )
Tabular Playground Series - Jan 2021
14,587,054
age_groups = age_data.groupby('YEARS_BINNED' ).mean() age_groups<filter>
y = train["target"] kf = KFold(n_splits=5, shuffle=True, random_state=1) oof_2 = np.zeros(len(train)) score_list = [] fold = 1 test_preds_2 = [] for train_index, test_index in kf.split(train): X_train, X_val = train.iloc[train_index], train.iloc[test_index] y_train, y_val = y.iloc[train_index], y.iloc[test_index] X_train_0 = X_train[X_train.target_class==0] y_train_0 = y_train[X_train.target_class==0] X_train_1 = X_train[X_train.target_class==1] y_train_1 = y_train[X_train.target_class==1] dtrain_0 = lgbm.Dataset(X_train_0[cont_features], y_train_0) dvalid = lgbm.Dataset(X_val[cont_features], y_val) params = {"objective": "regression", "metric": "rmse", "verbosity": -1, "boosting_type": "gbdt", "feature_fraction":0.5, "num_leaves": 250, "lambda_l1":4, "lambda_l2":2, "learning_rate":0.01, 'min_child_samples': 35, "bagging_fraction":0.75, "bagging_freq":1, } model_0 = lgbm.train(params, dtrain_0, valid_sets=[dtrain_0, dvalid], verbose_eval=100, num_boost_round=100000, early_stopping_rounds=100 ) y_pred_0 = model_0.predict(X_val[cont_features]) score = np.sqrt(mean_squared_error(y_val, y_pred_0)) print(f"RMSE Fold-{fold} : {score}") dtrain_1 = lgbm.Dataset(X_train_1[cont_features], y_train_1) dvalid = lgbm.Dataset(X_val[cont_features], y_val) params = {"objective": "regression", "metric": "rmse", "verbosity": -1, "boosting_type": "gbdt", "feature_fraction":0.5, "num_leaves": 250, "lambda_l1":4, "lambda_l2":2, "learning_rate":0.01, 'min_child_samples': 35, "bagging_fraction":0.75, "bagging_freq":1, } model_1 = lgbm.train(params, dtrain_1, valid_sets=[dtrain_1, dvalid], verbose_eval=100, num_boost_round=100000, early_stopping_rounds=100 ) y_pred_1 = model_1.predict(X_val[cont_features]) score = np.sqrt(mean_squared_error(y_val, y_pred_1)) print(f"RMSE Fold-{fold} : {score}") oof_2[test_index] = y_pred_0*(1-X_val["class_preds"])+ y_pred_1*(X_val["class_preds"]) score = np.sqrt(mean_squared_error(y_val,oof_2[test_index])) print(f"RMSE Fold-Final-{fold} : {score}") score_list.append(score) test_preds_2.append(model_0.predict(test[cont_features])*(1-test["class_preds"])+ model_1.predict(test[cont_features])*(test["class_preds"])) fold+=1 np.mean(score_list )
Tabular Playground Series - Jan 2021
14,587,054
anom = app_train[app_train['DAYS_EMPLOYED'] == 365243] non_anom = app_train[app_train['DAYS_EMPLOYED'] != 365243] print('이상값이 아닌 data의 target 평균: %0.2f%%' %(100 * non_anom['TARGET'].mean())) print('이상값인 data의 target 평균: %0.2f%%' %(100 * anom['TARGET'].mean())) <feature_engineering>
print(np.mean(score_list)) score_list
Tabular Playground Series - Jan 2021
14,587,054
app_test['DAYS_EMPLOYED_ANOM']=app_test['DAYS_EMPLOYED']==365243 app_test['DAYS_EMPLOYED'].replace({365243:np.nan}, inplace=True) print('%d 개의 data 중에 testing data에서 %d 개의 이상값이 있다.'%(len(app_test),app_test['DAYS_EMPLOYED_ANOM'].sum()))<feature_engineering>
train["2_preds"] = oof_2 test["2_preds"] = np.mean(test_preds_2,axis=0) np.sqrt(mean_squared_error(train["target"],train["2_preds"]))
Tabular Playground Series - Jan 2021
14,587,054
app_train['DAYS_BIRTH'] = app_train['DAYS_BIRTH'] / -365 app_test['DAYS_BIRTH'] = app_test['DAYS_BIRTH'] / -365 ext=app_train[['TARGET','EXT_SOURCE_1','EXT_SOURCE_2','EXT_SOURCE_3','DAYS_BIRTH']] extcorr = ext.corr() extcorr<count_values>
from tensorflow.keras.layers import Input,Dense,Dropout from tensorflow.keras import Model from tensorflow.keras.regularizers import l2 import tensorflow as tf
Tabular Playground Series - Jan 2021
14,587,054
app_train.dtypes.value_counts() <data_type_conversions>
def get_DAE() : inputs = Input(( 14,)) x = Dense(500, activation='relu' )(inputs) x = Dense(500, activation='relu', name="feature" )(x) x = Dense(500, activation='relu' )(x) outputs = Dense(14, activation='relu' )(x) model = Model(inputs=inputs, outputs=outputs) model.compile(optimizer='adam', loss='mse') return model
Tabular Playground Series - Jan 2021
14,587,054
object_columns = app_train.dtypes[app_train.dtypes == 'object'].index.tolist() object_columns<count_values>
alldata = pd.concat([train[cont_features],test[cont_features]],axis=0) print(alldata.shape) autoencoder = get_DAE() autoencoder.fit(alldata[cont_features], alldata[cont_features], epochs=15, batch_size=256, shuffle=True )
Tabular Playground Series - Jan 2021
14,587,054
cond_1 =(app_train['TARGET'] == 1) cond_0 =(app_train['TARGET'] == 0) for a in obj: print(a) print(' 연체인 경우 ',app_train[cond_1][a].value_counts() /app_train[cond_1].shape[0]) print(' 연체가 아닌 경우 ',app_train[cond_0][a].value_counts() /app_train[cond_0].shape[0]) print('----------------------------' )<categorify>
test_denoised = test.copy() test_denoised[cont_features] = autoencoder.predict(test_denoised[cont_features]) train_denoised = train.copy() train_denoised[cont_features] = autoencoder.predict(train_denoised[cont_features] )
Tabular Playground Series - Jan 2021
14,587,054
def missing_values_table(df): miss = df.isnull().sum() miss_percent = 100 * miss / len(df) mis_table = pd.concat([miss, miss_percent], axis=1) mis_val_table = mis_table.rename( columns = {0 : 'Missing Values', 1 : '% of Total Values'}) mis_val_table = mis_val_table[ mis_val_table.iloc[:,1] != 0].sort_values( '% of Total Values', ascending=False ).round(1) print("선택된 데이터프레임은 " + str(df.shape[1])+ "개의 컬럼이 있다. " "그중에서 " + str(mis_val_table.shape[0])+ " 개는 결측값이 있는 컬럼이다.") return mis_val_table<count_missing_values>
y = train["target"] kf = KFold(n_splits=5, shuffle=True, random_state=1) oof_3 = np.zeros(len(train)) score_list = [] fold = 1 test_preds_3 = [] seed_list = [None,2,3] for train_index, test_index in kf.split(train): X_train, X_val = train.iloc[train_index], train.iloc[test_index] y_train, y_val = y.iloc[train_index], y.iloc[test_index] X_train_denoised, X_val_denoised = train_denoised.iloc[train_index], train_denoised.iloc[test_index] print(X_train_denoised.shape) X_train_denoised = pd.concat([X_train_denoised,X_train],axis=0) y_train = pd.concat([y_train,y_train],axis=0) print(X_train_denoised.shape) y_pred_list = [] for seed in seed_list: dtrain = lgbm.Dataset(X_train_denoised[cont_features], y_train) dvalid = lgbm.Dataset(X_val[cont_features], y_val) params = {"objective": "regression", "metric": "rmse", "verbosity": -1, "boosting_type": "gbdt", "feature_fraction":0.5, "num_leaves": 250, "lambda_l1":7, "lambda_l2":2, "learning_rate":0.01, 'min_child_samples': 35, "bagging_fraction":0.75, "bagging_freq":1, } params["seed"] = seed model = lgbm.train(params, dtrain, valid_sets=[dtrain, dvalid], verbose_eval=100, num_boost_round=100000, early_stopping_rounds=100 ) dtrain = lgbm.Dataset(X_train_denoised[cont_features], y_train) dvalid = lgbm.Dataset(X_val[cont_features], y_val) params = {"objective": "regression", "metric": "rmse", "verbosity": -1, "boosting_type": "gbdt", "feature_fraction":0.5, "num_leaves": 350, "lambda_l1":7, "lambda_l2":1, "learning_rate":0.003, 'min_child_samples': 35, "bagging_fraction":0.8, "bagging_freq":1, } params["seed"] = seed model = lgbm.train(params, dtrain, valid_sets=[dtrain, dvalid], verbose_eval=100, num_boost_round=100000, early_stopping_rounds=200, init_model = model ) y_pred_list.append(model.predict(X_val[cont_features])) print(np.sqrt(mean_squared_error(y_val, np.mean(y_pred_list,axis=0)))) test_preds_3.append(model.predict(test[cont_features])) oof_3[test_index] = np.mean(y_pred_list,axis=0) score = np.sqrt(mean_squared_error(y_val, oof_3[test_index])) score_list.append(score) print(f"RMSE Fold-{fold} : {score}") fold+=1 np.mean(score_list )
Tabular Playground Series - Jan 2021
14,587,054
missing_values = missing_values_table(app_train) missing_values.head(20 )<concatenate>
print(np.mean(score_list)) score_list
Tabular Playground Series - Jan 2021
14,587,054
apps = pd.concat([app_train,app_test]) print(apps.shape )<count_values>
train["3_preds"] = oof_3 test["3_preds"] = np.mean(test_preds_3,axis=0 )
Tabular Playground Series - Jan 2021
14,587,054
apps['TARGET'].value_counts(dropna=False )<feature_engineering>
train[["id","1_preds","2_preds","3_preds"]].to_csv("train_preds.csv",index=False) test[["id","1_preds","2_preds","3_preds"]].to_csv("test_preds.csv",index=False )
Tabular Playground Series - Jan 2021
14,587,054
object_col = apps.dtypes[apps.dtypes == 'object'].index.tolist() for column in object_col: apps[column] = pd.factorize(apps[column])[0]<feature_engineering>
train["4_preds"] = pd.read_csv(".. /input/nn-with-categorical-embedding/nn_preds_train.csv")["4_preds"] test["4_preds"] = pd.read_csv(".. /input/nn-with-categorical-embedding/nn_preds_test.csv")["4_preds"]
Tabular Playground Series - Jan 2021
14,587,054
apps['CREDIT_INCOME_PERCENT'] = apps['AMT_CREDIT'] / apps['AMT_INCOME_TOTAL'] apps['ANNUITY_INCOME_PERCENT'] = apps['AMT_ANNUITY'] / apps['AMT_INCOME_TOTAL'] apps['CREDIT_TERM'] = apps['AMT_ANNUITY'] / apps['AMT_CREDIT'] apps['GOODS_CREDIT_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_CREDIT'] apps['CREDIT_GOODS_DIFF'] = apps['AMT_CREDIT'] - apps['AMT_GOODS_PRICE'] apps['GOODS_INCOME_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_INCOME_TOTAL']<feature_engineering>
print("1st model score: ",np.sqrt(mean_squared_error(train["target"], train["1_preds"]))) print("2nd model score: ",np.sqrt(mean_squared_error(train["target"], train["2_preds"]))) print("3rd model score: ",np.sqrt(mean_squared_error(train["target"], train["3_preds"]))) print("4th model score: ",np.sqrt(mean_squared_error(train["target"], train["4_preds"]))) print("Blending model score: ",np.sqrt(mean_squared_error(train["target"], train["4_preds"]*0.1+train["3_preds"]*0.3+train["2_preds"]*0.3+train["1_preds"]*0.3)) )
Tabular Playground Series - Jan 2021
14,587,054
apps['APPS_EXT_SOURCE_MEAN'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1) apps['APPS_EXT_SOURCE_STD'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1 )<feature_engineering>
input_df = train[["1_preds","2_preds","3_preds","4_preds"]].copy() sub_input_df = test[["1_preds","2_preds","3_preds","4_preds"]].copy()
Tabular Playground Series - Jan 2021
14,587,054
apps['APPS_EXT_SOURCE_STD'] = apps['APPS_EXT_SOURCE_STD'].fillna(apps['APPS_EXT_SOURCE_STD'].mean() )<feature_engineering>
y = train["target"] kf = KFold(n_splits=5, shuffle=True, random_state=1) oof_stack = np.zeros(len(train)) score_list= [] fold = 1 test_preds_stack = [] for train_index, test_index in kf.split(input_df): X_train, X_val = input_df.iloc[train_index], input_df.iloc[test_index] y_train, y_val = y.iloc[train_index], y.iloc[test_index] reg = LinearRegression(fit_intercept=True ).fit(X_train, y_train) y_stack = reg.predict(X_val) oof_stack[test_index] = y_stack*1 score = np.sqrt(mean_squared_error(y_val, oof_stack[test_index])) score_list.append(score) test_preds_stack.append(reg.predict(sub_input_df.values ).ravel()) np.sqrt(mean_squared_error(y, oof_stack))
Tabular Playground Series - Jan 2021
14,587,054
apps['EMPLOYED_BIRTH_RATIO'] = apps['DAYS_EMPLOYED']/apps['DAYS_BIRTH'] apps['INCOME_EMPLOYED_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_EMPLOYED'] apps['INCOME_BIRTH_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_BIRTH'] apps['CAR_BIRTH_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_BIRTH'] apps['CAR_EMPLOYED_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_EMPLOYED']<load_from_csv>
print(np.mean(score_list)) score_list
Tabular Playground Series - Jan 2021
14,587,054
ccb = pd.read_csv('.. /input/home-credit-default-risk/credit_card_balance.csv' )<merge>
def final_features(df): combinations_list = [("1_preds","2_preds"), ("1_preds","3_preds"), ("1_preds","4_preds"), ("2_preds","3_preds"), ("2_preds","4_preds"), ("3_preds","4_preds")] for i in range(len(combinations_list)) : col1 = combinations_list[i][0] col2 = combinations_list[i][1] df.loc[:,f"{col1}_{col2}_multiply"] = df[col1] * df[col2] df.loc[:,f"{col1}_{col2}_divide"] = df[col1] / df[col2] combinations_list = [("1_preds","2_preds","3_preds"), ("1_preds","2_preds","4_preds"), ("1_preds","3_preds","4_preds"), ("2_preds","3_preds","4_preds")] for i in range(len(combinations_list)) : col1 = combinations_list[i][0] col2 = combinations_list[i][1] col3 = combinations_list[i][2] df.loc[:,f"{col1}_{col2}_{col3}_multiply"] = df[col1] * df[col2]* df[col3] return df input_df = train[["1_preds","2_preds","3_preds","4_preds"]].copy() sub_input_df = test[["1_preds","2_preds","3_preds","4_preds"]].copy() input_df = final_features(input_df) sub_input_df = final_features(sub_input_df) input_df.head()
Tabular Playground Series - Jan 2021
14,587,054
app_ccb = ccb.merge(app_train, left_on='SK_ID_CURR', right_on='SK_ID_CURR', how='outer') app_ccb.shape<count_missing_values>
y = train["target"] kf = KFold(n_splits=5, shuffle=True, random_state=1) oof_stack = np.zeros(len(train)) score_list= [] fold = 1 test_preds_stack = [] for train_index, test_index in kf.split(input_df): X_train, X_val = input_df.iloc[train_index], input_df.iloc[test_index] y_train, y_val = y.iloc[train_index], y.iloc[test_index] reg = LinearRegression(fit_intercept=True ).fit(X_train, y_train) y_stack = reg.predict(X_val) oof_stack[test_index] = y_stack*1 score = np.sqrt(mean_squared_error(y_val, oof_stack[test_index])) score_list.append(score) test_preds_stack.append(reg.predict(sub_input_df.values ).ravel()) np.sqrt(mean_squared_error(y, oof_stack))
Tabular Playground Series - Jan 2021
14,587,054
missing_values = missing_values_table(ccb) missing_values.head(20 )<groupby>
print(np.mean(score_list)) score_list
Tabular Playground Series - Jan 2021
14,587,054
app_ccb.groupby('SK_ID_CURR' ).count()<groupby>
train["stacked_preds"] = oof_stack test["target"] = np.mean(test_preds_stack,axis=0 )
Tabular Playground Series - Jan 2021
14,587,054
<merge><EOS>
train[["id","stacked_preds"]].to_csv("tarin_stacked_oof.csv",index=False) test[["id","target"]].to_csv("submission.csv",index=False )
Tabular Playground Series - Jan 2021
14,412,255
<SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<define_variables>
import pandas as pd import numpy as np from sklearn.ensemble import StackingRegressor from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.neural_network import MLPRegressor from lightgbm import LGBMRegressor from xgboost import XGBRegressor from catboost import CatBoostRegressor from vecstack import stacking from sklearn.metrics import mean_squared_error from sklearn.linear_model import Ridge, Lasso
Tabular Playground Series - Jan 2021
14,412,255
num_columns = app_ccb_target.dtypes[app_ccb_target.dtypes != 'object'].index.tolist()<define_variables>
train = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/train.csv") test = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/test.csv") sample_submission = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/sample_submission.csv" )
Tabular Playground Series - Jan 2021
14,412,255
num_columns = [column for column in num_columns if column not in ['SK_ID_PREV', 'SK_ID_CURR', 'TARGET']] num_columns<groupby>
train = train.drop('id', axis=1) test = test.drop('id', axis=1) X = train.drop('target', axis=1) y = train.target
Tabular Playground Series - Jan 2021
14,412,255
print(app_ccb_target.groupby('TARGET' ).agg({'AMT_BALANCE': ['mean', 'median', 'count','sum','max']})) print(app_ccb_target.groupby('TARGET' ).agg({'AMT_CREDIT_LIMIT_ACTUAL': ['mean', 'median', 'count','sum','max']})) print(app_ccb_target.groupby('TARGET' ).agg({'AMT_INST_MIN_REGULARITY': ['mean', 'median', 'count','sum','max']})) print(app_ccb_target.groupby('TARGET' ).agg({'CNT_INSTALMENT_MATURE_CUM': ['mean', 'median', 'count','sum','max']})) print(app_ccb_target.groupby('TARGET' ).agg({'AMT_INST_MIN_REGULARITY': ['mean', 'median', 'count','sum','max']})) print(app_ccb_target.groupby('TARGET' ).agg({'AMT_CREDIT_LIMIT_ACTUAL': ['mean', 'median', 'count','sum','max']})) <filter>
estimators = [ RandomForestRegressor(n_estimators=500), GradientBoostingRegressor(n_estimators=500), Ridge() , Lasso() , LGBMRegressor(bagging_fraction=0.82, bagging_freq=6, cat_smooth=1.0, feature_fraction=0.5, lambda_l1=1.075e-05, lambda_l2=2.05e-06, learning_rate=0.005, max_dept=-1, metric='rmse', min_data_in_leaf=100, min_data_per_group=5, min_gain_to_split=0.0, min_sum_hessian_in_leaf=0.001, n_estimators=500, num_leaves=246), XGBRegressor(colsample_bytree=0.5, alpha=0.01563, learning_rate=0.01, max_depth=15, min_child_weight=246, n_estimators=500, reg_lambda=0.003, subsample=0.7, metric_period=100, silent=1), CatBoostRegressor() ]
Tabular Playground Series - Jan 2021
14,412,255
ccb_amt_agg=ccb_amt_agg.reset_index() ccb_amt_agg<drop_column>
S_train, S_test = stacking(estimators, X, y, test, regression=True, metric=mean_squared_error, n_folds=3, shuffle=True, random_state=0, verbose=2 )
Tabular Playground Series - Jan 2021
14,412,255
ccb_amt_agg=ccb_amt_agg.drop(['CCB_SK_ID_CURR_COUNT'],axis=1) ccb_amt_agg<merge>
reg = LinearRegression() reg = reg.fit(S_train, y )
Tabular Playground Series - Jan 2021
14,412,255
<feature_engineering><EOS>
y_pred = reg.predict(S_test) sample_submission['target'] = y_pred sample_submission.to_csv("stacking_regressor_submission.csv", index=False )
Tabular Playground Series - Jan 2021
14,438,581
<SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<split>
import numpy as np import pandas as pd
Tabular Playground Series - Jan 2021
14,438,581
apps_train = apps[~apps['TARGET'].isnull() ] apps_test = apps[apps['TARGET'].isnull() ] apps.shape, apps_train.shape, apps_test.shape<split>
df_train = pd.read_csv(".. /input/tabular-playground-series-jan-2021/train.csv") df_test = pd.read_csv(".. /input/tabular-playground-series-jan-2021/test.csv" )
Tabular Playground Series - Jan 2021
14,438,581
ftr_app = apps_train.drop(['SK_ID_CURR', 'TARGET'], axis=1) target_app = apps_train['TARGET'] train_x, valid_x, train_y, valid_y = train_test_split(ftr_app, target_app, test_size=0.3, random_state=2020) train_x.shape, valid_x.shape<train_model>
x = df_train.iloc[:, 1:15].values print(x) y = df_train.iloc[:, -1].values
Tabular Playground Series - Jan 2021
14,438,581
clf = LGBMClassifier( n_jobs=-1, n_estimators=1000, learning_rate=0.02, num_leaves=32, subsample=0.8, max_depth=12, silent=-1, verbose=-1 ) clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric= 'auc', verbose= 100, early_stopping_rounds= 100 )<predict_on_test>
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=0 )
Tabular Playground Series - Jan 2021
14,438,581
preds = clf.predict_proba(apps_test.drop(['SK_ID_CURR', 'TARGET'], axis=1)) [:, 1 ]<save_to_csv>
from xgboost import XGBRegressor import lightgbm as ltb from sklearn.model_selection import GridSearchCV from sklearn import metrics from sklearn import model_selection
Tabular Playground Series - Jan 2021
14,438,581
apps_test['TARGET'] = preds apps_test[['SK_ID_CURR', 'TARGET']].to_csv('apps_baseline05.csv', index=False )<set_options>
XGB = XGBRegressor(max_depth=3,learning_rate=0.1,n_estimators=1000,reg_alpha=0.001,reg_lambda=0.000001,n_jobs=-1,min_child_weight=3) XGB.fit(X_train,y_train )
Tabular Playground Series - Jan 2021
14,438,581
%matplotlib inline warnings.filterwarnings('ignore') pd.set_option('display.max_rows', 100) pd.set_option('display.max_columns', 200 )<load_from_csv>
y_pred_xgb = XGB.predict(X_test )
Tabular Playground Series - Jan 2021
14,438,581
app_train = pd.read_csv('.. /input/home-credit-default-risk/application_train.csv') app_test = pd.read_csv('.. /input/home-credit-default-risk/application_test.csv' )<count_missing_values>
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred_xgb)) print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred_xgb)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred_xgb)) )
Tabular Playground Series - Jan 2021
14,438,581
print(app_train.isnull().sum()) print("결측치 있는 컴럼 개수: ",sum(app_train.isnull().sum() !=0))<count_values>
lgbm = ltb.LGBMRegressor()
Tabular Playground Series - Jan 2021
14,438,581
app_train['TARGET'].value_counts()<define_variables>
param_grid = { "boosting_type": ['gbdt'], "num_leaves": [9, 19], "max_depth": [29], "learning_rate": [0.1, 0.15], "n_estimators": [1000], "subsample_for_bin": [200000], "objective": ["regression"], "min_child_weight": [0.01], "min_child_samples":[100, 200], "subsample":[1.0], "subsample_freq":[0], "colsample_bytree":[1.0], "reg_alpha":[0.0], "reg_lambda":[0.0] }
Tabular Playground Series - Jan 2021
14,438,581
columns = ['AMT_INCOME_TOTAL','AMT_CREDIT', 'AMT_ANNUITY', 'AMT_GOODS_PRICE', 'DAYS_BIRTH', 'DAYS_EMPLOYED', 'DAYS_ID_PUBLISH', 'DAYS_REGISTRATION', 'DAYS_LAST_PHONE_CHANGE', 'CNT_FAM_MEMBERS', 'REGION_RATING_CLIENT', 'EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'AMT_REQ_CREDIT_BUREAU_HOUR', 'AMT_REQ_CREDIT_BUREAU_DAY', 'AMT_REQ_CREDIT_BUREAU_WEEK', 'AMT_REQ_CREDIT_BUREAU_MON', 'AMT_REQ_CREDIT_BUREAU_QRT', 'AMT_REQ_CREDIT_BUREAU_YEAR'] show_hist_by_target(app_train, columns )<count_values>
model = model_selection.RandomizedSearchCV( estimator=lgbm, param_distributions=param_grid, n_iter=100, scoring="neg_root_mean_squared_error", verbose=10, n_jobs=-1, cv=5 )
Tabular Playground Series - Jan 2021
14,438,581
cond_1 =(app_train['TARGET'] == 1) cond_0 =(app_train['TARGET'] == 0) print('CODE_GENDER ') print(app_train['CODE_GENDER'].value_counts() /app_train.shape[0]) print(' 연체인 경우 ',app_train[cond_1]['CODE_GENDER'].value_counts() /app_train[cond_1].shape[0]) print(' 연체가 아닌 경우 ',app_train[cond_0]['CODE_GENDER'].value_counts() /app_train[cond_0].shape[0] )<count_values>
model.fit(X_train, y_train )
Tabular Playground Series - Jan 2021
14,438,581
app_train['DAYS_EMPLOYED'].value_counts()<count_values>
print(f"Best score: {model.best_score_}") print("Best parameters from the RandomSearchCV:") best_parameters = model.best_estimator_.get_params() for param_name in sorted(param_grid.keys()): print(f"\t{param_name}: {best_parameters[param_name]}" )
Tabular Playground Series - Jan 2021
14,438,581
app_train['DAYS_EMPLOYED'] = app_train['DAYS_EMPLOYED'].replace(365243, np.nan) app_train['DAYS_EMPLOYED'].value_counts(dropna=False )<count_values>
best_model = model.best_estimator_
Tabular Playground Series - Jan 2021
14,438,581
app_train['CODE_GENDER'].value_counts()<concatenate>
y_pred_lgb = best_model.predict(X_test )
Tabular Playground Series - Jan 2021
14,438,581
apps = pd.concat([app_train, app_test]) print(apps.shape )<feature_engineering>
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred_lgb)) print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred_lgb)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred_lgb)) )
Tabular Playground Series - Jan 2021
14,438,581
apps['APPS_EXT_SOURCE_MEAN'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1) apps['APPS_EXT_SOURCE_STD'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1 )<feature_engineering>
preds_ensemble_avg =(y_pred_xgb + y_pred_lgb)/2
Tabular Playground Series - Jan 2021
14,438,581
apps['APPS_EXT_SOURCE_STD'] = apps['APPS_EXT_SOURCE_STD'].fillna(apps['APPS_EXT_SOURCE_STD'].mean() )<feature_engineering>
print("Averaging Ensemble predictions KPI here:") print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, preds_ensemble_avg)) print('Mean Squared Error:', metrics.mean_squared_error(y_test, preds_ensemble_avg)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, preds_ensemble_avg)) )
Tabular Playground Series - Jan 2021
14,438,581
apps['APPS_ANNUITY_CREDIT_RATIO'] = apps['AMT_ANNUITY']/apps['AMT_CREDIT'] apps['APPS_GOODS_CREDIT_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_CREDIT'] apps['APPS_CREDIT_GOODS_DIFF'] = apps['AMT_CREDIT'] - apps['AMT_GOODS_PRICE']<feature_engineering>
preds_ensemble_avg =(y_pred_xgb*0.1 + y_pred_lgb *0.9 )
Tabular Playground Series - Jan 2021
14,438,581
apps['APPS_ANNUITY_INCOME_RATIO'] = apps['AMT_ANNUITY']/apps['AMT_INCOME_TOTAL'] apps['APPS_CREDIT_INCOME_RATIO'] = apps['AMT_CREDIT']/apps['AMT_INCOME_TOTAL'] apps['APPS_GOODS_INCOME_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_INCOME_TOTAL'] apps['APPS_CNT_FAM_INCOME_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['CNT_FAM_MEMBERS'] <feature_engineering>
print("Weighted average Ensemble predictions KPI here:") print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, preds_ensemble_avg)) print('Mean Squared Error:', metrics.mean_squared_error(y_test, preds_ensemble_avg)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, preds_ensemble_avg)) )
Tabular Playground Series - Jan 2021
14,438,581
apps['APPS_EMPLOYED_BIRTH_RATIO'] = apps['DAYS_EMPLOYED']/apps['DAYS_BIRTH'] apps['APPS_INCOME_EMPLOYED_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_EMPLOYED'] apps['APPS_INCOME_BIRTH_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_BIRTH'] apps['APPS_CAR_BIRTH_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_BIRTH'] apps['APPS_CAR_EMPLOYED_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_EMPLOYED']<feature_engineering>
preds_xgb = XGB.predict(df_test.iloc[:,1:].values )
Tabular Playground Series - Jan 2021
14,438,581
object_columns = apps.dtypes[apps.dtypes=='object'].index.tolist() for column in object_columns: apps[column] = pd.factorize(apps[column])[0]<split>
preds_lgb = best_model.predict(df_test.iloc[:,1:].values )
Tabular Playground Series - Jan 2021
14,438,581
ftr_app = apps_train.drop(['SK_ID_CURR', 'TARGET'], axis=1) target_app = app_train['TARGET'] train_x, valid_x, train_y, valid_y = train_test_split(ftr_app, target_app, test_size=0.3, random_state=2020) train_x.shape, valid_x.shape <choose_model_class>
sub=pd.read_csv(".. /input/tabular-playground-series-jan-2021/sample_submission.csv" )
Tabular Playground Series - Jan 2021
14,438,581
<train_model><EOS>
sub.target =(preds_xgb*0.1+ preds_lgb*0.9) sub.to_csv("submission.csv", index=False )
Tabular Playground Series - Jan 2021
14,515,126
<SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<predict_on_test>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import gc import xgboost as xgb from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from hyperopt import hp, fmin, tpe, Trials from hyperopt.pyll.base import scope from tqdm import tqdm
Tabular Playground Series - Jan 2021
14,515,126
preds = clf.predict_proba(apps_test.drop(['SK_ID_CURR', 'TARGET'], axis=1)) [:, 1 ]<save_to_csv>
path = '.. /input/tabular-playground-series-jan-2021/' train = pd.read_csv(path+'train.csv') train.set_index('id',drop=True,inplace=True) train.drop(284103,inplace=True )
Tabular Playground Series - Jan 2021
14,515,126
app_test['TARGET'] = preds app_test[['SK_ID_CURR', 'TARGET']].to_csv('apps_baseline_02.csv', index=False )<feature_engineering>
features = [col for col in train.columns if 'cont' in col] label = 'target'
Tabular Playground Series - Jan 2021
14,515,126
def get_apps_processed(apps): apps['APPS_EXT_SOURCE_MEAN'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1) apps['APPS_EXT_SOURCE_STD'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1) apps['APPS_EXT_SOURCE_STD'] = apps['APPS_EXT_SOURCE_STD'].fillna(apps['APPS_EXT_SOURCE_STD'].mean()) apps['APPS_ANNUITY_CREDIT_RATIO'] = apps['AMT_ANNUITY']/apps['AMT_CREDIT'] apps['APPS_GOODS_CREDIT_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_CREDIT'] apps['APPS_ANNUITY_INCOME_RATIO'] = apps['AMT_ANNUITY']/apps['AMT_INCOME_TOTAL'] apps['APPS_CREDIT_INCOME_RATIO'] = apps['AMT_CREDIT']/apps['AMT_INCOME_TOTAL'] apps['APPS_GOODS_INCOME_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_INCOME_TOTAL'] apps['APPS_CNT_FAM_INCOME_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['CNT_FAM_MEMBERS'] apps['APPS_EMPLOYED_BIRTH_RATIO'] = apps['DAYS_EMPLOYED']/apps['DAYS_BIRTH'] apps['APPS_INCOME_EMPLOYED_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_EMPLOYED'] apps['APPS_INCOME_BIRTH_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_BIRTH'] apps['APPS_CAR_BIRTH_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_BIRTH'] apps['APPS_CAR_EMPLOYED_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_EMPLOYED'] return apps<load_from_csv>
fig.clear() plt.close(fig )
Tabular Playground Series - Jan 2021
14,515,126
prev = pd.read_csv('.. /input/home-credit-default-risk/previous_application.csv') print(prev.shape, apps.shape )<groupby>
X_train,X_valid, y_train,y_valid = train_test_split(train[features],train[label],test_size=0.2) d_tr = xgb.DMatrix(X_train, y_train) d_val = xgb.DMatrix(X_valid,y_valid )
Tabular Playground Series - Jan 2021
14,515,126
prev.groupby('SK_ID_CURR')['SK_ID_CURR'].count().mean()<merge>
params_base = {'objective': 'reg:squarederror', 'tree_method': 'gpu_hist', 'random_state': 0} base_model = xgb.train(params = params_base, dtrain = d_tr, num_boost_round = 1000, evals = [(d_val,'eval')], early_stopping_rounds=10, verbose_eval = 20) y_pred_base = base_model.predict(d_val) base_score = mean_squared_error(y_valid, y_pred_base,squared=False) print(base_score )
Tabular Playground Series - Jan 2021
14,515,126
app_prev_target = prev.merge(app_train[['SK_ID_CURR', 'TARGET']], on='SK_ID_CURR', how='left') app_prev_target.shape<feature_engineering>
def score(params): ps = {'learning_rate': params['learning_rate'], 'max_depth': params['max_depth'], 'gamma': params['gamma'], 'min_child_weight': params['min_child_weight'], 'subsample': params['subsample'], 'colsample_bytree': params['colsample_bytree'], 'verbosity': 1, 'objective': 'reg:squarederror', 'eval_metric': 'rmse', 'tree_method': 'gpu_hist', 'random_state': 27, } model = xgb.train(ps,d_tr, params['n_round'], [(d_val, 'eval')], early_stopping_rounds=10, verbose_eval = False) y_pred = model.predict(d_val) score = mean_squared_error(y_valid, y_pred,squared=False) return score
Tabular Playground Series - Jan 2021
14,515,126
def get_prev_processed(prev): prev['PREV_CREDIT_DIFF'] = prev['AMT_APPLICATION'] - prev['AMT_CREDIT'] prev['PREV_GOODS_DIFF'] = prev['AMT_APPLICATION'] - prev['AMT_GOODS_PRICE'] prev['PREV_CREDIT_APPL_RATIO'] = prev['AMT_CREDIT']/prev['AMT_APPLICATION'] prev['PREV_ANNUITY_APPL_RATIO'] = prev['AMT_ANNUITY']/prev['AMT_APPLICATION'] prev['PREV_GOODS_APPL_RATIO'] = prev['AMT_GOODS_PRICE']/prev['AMT_APPLICATION'] prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True) prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True) prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True) prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True) prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True) prev['PREV_DAYS_LAST_DUE_DIFF'] = prev['DAYS_LAST_DUE_1ST_VERSION'] - prev['DAYS_LAST_DUE'] all_pay = prev['AMT_ANNUITY'] * prev['CNT_PAYMENT'] prev['PREV_INTERESTS_RATE'] =(all_pay/prev['AMT_CREDIT'] - 1)/prev['CNT_PAYMENT'] return prev<groupby>
param_space = {'learning_rate': hp.uniform('learning_rate', 0.01, 0.3), 'n_round': scope.int(hp.quniform('n_round', 200, 3000, 100)) , 'max_depth': scope.int(hp.quniform('max_depth', 5, 16, 1)) , 'gamma': hp.uniform('gamma', 0, 10), 'min_child_weight': hp.uniform('min_child_weight', 0, 10), 'subsample': hp.uniform('subsample', 0.1, 1), 'colsample_bytree': hp.uniform('colsample_bytree', 0.1, 1) }
Tabular Playground Series - Jan 2021
14,515,126
def get_prev_amt_agg(prev): agg_dict = { 'SK_ID_CURR':['count'], 'AMT_CREDIT':['mean', 'max', 'sum'], 'AMT_ANNUITY':['mean', 'max', 'sum'], 'AMT_APPLICATION':['mean', 'max', 'sum'], 'AMT_DOWN_PAYMENT':['mean', 'max', 'sum'], 'AMT_GOODS_PRICE':['mean', 'max', 'sum'], 'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'], 'DAYS_DECISION': ['min', 'max', 'mean'], 'CNT_PAYMENT': ['mean', 'sum'], 'PREV_CREDIT_DIFF':['mean', 'max', 'sum'], 'PREV_CREDIT_APPL_RATIO':['mean', 'max'], 'PREV_GOODS_DIFF':['mean', 'max', 'sum'], 'PREV_GOODS_APPL_RATIO':['mean', 'max'], 'PREV_DAYS_LAST_DUE_DIFF':['mean', 'max', 'sum'], 'PREV_INTERESTS_RATE':['mean', 'max'] } prev_group = prev.groupby('SK_ID_CURR') prev_amt_agg = prev_group.agg(agg_dict) prev_amt_agg.columns = ["PREV_"+ "_".join(x ).upper() for x in prev_amt_agg.columns.ravel() ] prev_amt_agg = prev_amt_agg.reset_index() return prev_amt_agg<groupby>
%time trials = Trials() hopt = fmin(fn = score, space = param_space, algo = tpe.suggest, max_evals = 1000, trials = trials, )
Tabular Playground Series - Jan 2021
14,515,126
def get_prev_refused_appr_agg(prev): prev_refused_appr_group = prev[prev['NAME_CONTRACT_STATUS'].isin(['Approved', 'Refused'])].groupby([ 'SK_ID_CURR', 'NAME_CONTRACT_STATUS']) prev_refused_appr_agg = prev_refused_appr_group['SK_ID_CURR'].count().unstack() prev_refused_appr_agg.columns = ['PREV_APPROVED_COUNT', 'PREV_REFUSED_COUNT' ] prev_refused_appr_agg = prev_refused_appr_agg.fillna(0) prev_refused_appr_agg = prev_refused_appr_agg.reset_index() return prev_refused_appr_agg<merge>
params_best = hopt params_best['max_depth'] = int(hopt['max_depth']) n_rounds_best = int(hopt['n_round']) del params_best['n_round'] print(params_best) print(n_rounds_best )
Tabular Playground Series - Jan 2021
14,515,126
def get_prev_agg(prev): prev = get_prev_processed(prev) prev_amt_agg = get_prev_amt_agg(prev) prev_refused_appr_agg = get_prev_refused_appr_agg(prev) prev_agg = prev_amt_agg.merge(prev_refused_appr_agg, on='SK_ID_CURR', how='left') prev_agg['PREV_REFUSED_RATIO'] = prev_agg['PREV_REFUSED_COUNT']/prev_agg['PREV_SK_ID_CURR_COUNT'] prev_agg['PREV_APPROVED_RATIO'] = prev_agg['PREV_APPROVED_COUNT']/prev_agg['PREV_SK_ID_CURR_COUNT'] prev_agg = prev_agg.drop(['PREV_REFUSED_COUNT', 'PREV_APPROVED_COUNT'], axis=1) return prev_agg<merge>
%time params_best['tree_method'] = 'gpu_hist' d = xgb.DMatrix(train[features], train[label]) xgb_final = xgb.train(params_best,d,n_rounds_best )
Tabular Playground Series - Jan 2021
14,515,126
def get_apps_all_with_prev_agg(apps, prev): apps_all = get_apps_processed(apps) prev_agg = get_prev_agg(prev) print('prev_agg shape:', prev_agg.shape) print('apps_all before merge shape:', apps_all.shape) apps_all = apps_all.merge(prev_agg, on='SK_ID_CURR', how='left') print('apps_all after merge with prev_agg shape:', apps_all.shape) return apps_all<categorify>
y_pred_final = xgb_final.predict(d) score_final = np.sqrt(mean_squared_error(train[label], y_pred_final)) print(score_final )
Tabular Playground Series - Jan 2021
14,515,126
def get_apps_all_encoded(apps_all): object_columns = apps_all.dtypes[apps_all.dtypes == 'object'].index.tolist() for column in object_columns: apps_all[column] = pd.factorize(apps_all[column])[0] return apps_all<split>
test = pd.read_csv(path + 'test.csv') test.set_index('id',drop=True,inplace=True) d_tst = xgb.DMatrix(test[features])
Tabular Playground Series - Jan 2021
14,515,126
def get_apps_all_train_test(apps_all): apps_all_train = apps_all[~apps_all['TARGET'].isnull() ] apps_all_test = apps_all[apps_all['TARGET'].isnull() ] apps_all_test = apps_all_test.drop('TARGET', axis=1) return apps_all_train, apps_all_test<split>
models = [] for seed in range(0,10): params_best['seed'] = seed xgb_final = xgb.train(params_best,d,num_boost_round = n_rounds_best) models.append(xgb_final) xgb_pred = xgb_final.predict(d_tst )
Tabular Playground Series - Jan 2021
14,515,126
def train_apps_all(apps_all_train): ftr_app = apps_all_train.drop(['SK_ID_CURR', 'TARGET'], axis=1) target_app = apps_all_train['TARGET'] train_x, valid_x, train_y, valid_y = train_test_split(ftr_app, target_app, test_size=0.3, random_state=2020) print('train shape:', train_x.shape, 'valid shape:', valid_x.shape) clf = LGBMClassifier( n_jobs=-1, n_estimators=1000, learning_rate=0.02, num_leaves=32, subsample=0.8, max_depth=12, silent=-1, verbose=-1 ) clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric= 'auc', verbose= 100, early_stopping_rounds= 100) return clf<load_from_csv>
ids = test.index output = pd.DataFrame({'id': ids, 'target': xgb_pred}) output.to_csv('submission.csv', index=False )
Tabular Playground Series - Jan 2021
14,194,052
def get_dataset() : app_train = pd.read_csv('.. /input/home-credit-default-risk/application_train.csv') app_test = pd.read_csv('.. /input/home-credit-default-risk/application_test.csv') apps = pd.concat([app_train, app_test]) prev = pd.read_csv('.. /input/home-credit-default-risk/previous_application.csv') return apps, prev<categorify>
input_path = Path('/kaggle/input/tabular-playground-series-jan-2021/' )
Tabular Playground Series - Jan 2021
14,194,052
apps, prev = get_dataset() apps_all = get_apps_all_with_prev_agg(apps, prev) apps_all = get_apps_all_encoded(apps_all) apps_all_train, apps_all_test = get_apps_all_train_test(apps_all) clf = train_apps_all(apps_all_train )<save_to_csv>
train = pd.read_csv(input_path / 'train.csv', index_col='id') display(train.head() )
Tabular Playground Series - Jan 2021
14,194,052
preds = clf.predict_proba(apps_all_test.drop(['SK_ID_CURR'], axis=1)) [:, 1 ] apps_all_test['TARGET'] = preds apps_all_test[['SK_ID_CURR', 'TARGET']].to_csv('prev_baseline_03.csv', index=False )<load_from_csv>
test = pd.read_csv(input_path / 'test.csv', index_col='id') display(test.head() )
Tabular Playground Series - Jan 2021
14,194,052
bureau = pd.read_csv('.. /input/home-credit-default-risk/bureau.csv') bureau_bal = pd.read_csv('.. /input/home-credit-default-risk/bureau_balance.csv' )<merge>
submission = pd.read_csv(input_path / 'sample_submission.csv', index_col='id') display(submission.head() )
Tabular Playground Series - Jan 2021
14,194,052
bureau_app = bureau.merge(app_train[['SK_ID_CURR', 'TARGET']], left_on='SK_ID_CURR', right_on='SK_ID_CURR', how='inner') bureau_app.shape<define_variables>
features = [f'cont{x}'for x in range(1,15)] data= train[features] train.isnull().sum()
Tabular Playground Series - Jan 2021
14,194,052
num_columns = bureau_app.dtypes[bureau_app.dtypes != 'object'].index.tolist() num_columns = [column for column in num_columns if column not in['SK_ID_BUREAU', 'SK_ID_CURR', 'TARGET']] num_columns<data_type_conversions>
train[train['target'] == 0]
Tabular Playground Series - Jan 2021
14,194,052
object_columns = bureau.dtypes[bureau.dtypes=='object'].index.tolist() object_columns<groupby>
def replace_outliers(data): for col in data.columns: Q1 = data[col].quantile(0.25) Q3 = data[col].quantile(0.75) IQR = Q3 - Q1 median_ = data[col].median() data.loc[(( data[col] < Q1 - 1.5*IQR)|(data[col] > Q3 + 1.5*IQR)) , col] = median_ return data
Tabular Playground Series - Jan 2021
14,194,052
show_category_by_target(bureau_app, object_columns )<feature_engineering>
train = replace_outliers(train )
Tabular Playground Series - Jan 2021