kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
1,072,962 | %matplotlib inline
plt.rcParams['figure.figsize'] = [9, 12]
warnings.simplefilter('ignore' )<load_from_csv> | data = data.merge(right=avg_buro.reset_index() , how='left', on='SK_ID_CURR')
test = test.merge(right=avg_buro.reset_index() , how='left', on='SK_ID_CURR')
data = data.merge(right=avg_prev.reset_index() , how='left', on='SK_ID_CURR')
test = test.merge(right=avg_prev.reset_index() , how='left', on='SK_ID_CURR')
data = data.merge(right=avg_pos.reset_index() , how='left', on='SK_ID_CURR')
test = test.merge(right=avg_pos.reset_index() , how='left', on='SK_ID_CURR')
data = data.merge(right=avg_cc_bal.reset_index() , how='left', on='SK_ID_CURR')
test = test.merge(right=avg_cc_bal.reset_index() , how='left', on='SK_ID_CURR')
data = data.merge(right=avg_inst.reset_index() , how='left', on='SK_ID_CURR')
test = test.merge(right=avg_inst.reset_index() , how='left', on='SK_ID_CURR')
del avg_buro, avg_prev
gc.collect() | Home Credit Default Risk |
1,072,962 | train = pd.read_csv("/kaggle/input/whoisafriend/train.csv")
test = pd.read_csv("/kaggle/input/whoisafriend/test.csv")
sub = pd.read_csv("/kaggle/input/whoisafriend/sample_submission.csv")
train.shape, test.shape, sub.shape<groupby> | gc.enable()
folds = KFold(n_splits=6, shuffle=True, random_state=546789)
oof_preds = np.zeros(data.shape[0])
sub_preds = np.zeros(test.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in data.columns if f not in ['SK_ID_CURR']] | Home Credit Default Risk |
1,072,962 | agg_train = train.groupby(['Person A', 'Person B'])['Years of Knowing'].count().reset_index()
agg_train.rename({
"Years of Knowing": "Interaction Count"
}, axis=1, inplace=True)
agg_test = test.groupby(['Person A', 'Person B'])['Years of Knowing'].count().reset_index()
agg_test.rename({
"Years of Knowing": "Interaction Count"
}, axis=1, inplace=True )<merge> | for n_fold,(trn_idx, val_idx)in enumerate(folds.split(data)) :
trn_x, trn_y = data[feats].iloc[trn_idx], y.iloc[trn_idx]
val_x, val_y = data[feats].iloc[val_idx], y.iloc[val_idx]
clf = LGBMClassifier(
n_estimators=10000,
learning_rate=0.03,
num_leaves = 22,
colsample_bytree=0.8,
subsample=0.8,
max_depth=6,
reg_alpha=0.1,
reg_lambda=0.1,
min_split_gain=0.01,
min_child_weight=100,
silent=-1,
verbose=-1,
)
clf.fit(trn_x, trn_y,
eval_set= [(trn_x, trn_y),(val_x, val_y)],
eval_metric='auc', verbose=100, early_stopping_rounds=300
)
oof_preds[val_idx] = clf.predict_proba(val_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d AUC : %.6f' %(n_fold + 1, roc_auc_score(val_y, oof_preds[val_idx])))
del clf, trn_x, trn_y, val_x, val_y
gc.collect() | Home Credit Default Risk |
1,072,962 | <feature_engineering><EOS> | print('Full AUC score %.6f' % roc_auc_score(y, oof_preds))
test['TARGET'] = sub_preds
test[['SK_ID_CURR', 'TARGET']].to_csv('first_submission.csv', index=False ) | Home Credit Default Risk |
1,056,158 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<save_to_csv> | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix,f1_score
import gc
| Home Credit Default Risk |
1,056,158 | test[['ID', 'Friends']].to_csv("1.0_sub.csv", index=False )<load_from_csv> | application_test=pd.read_csv('.. /input/application_test.csv')
application_train=pd.read_csv('.. /input/application_train.csv')
bureau=pd.read_csv('.. /input/bureau.csv')
bureau_balance=pd.read_csv('.. /input/bureau_balance.csv')
credit_card_balance=pd.read_csv('.. /input/credit_card_balance.csv')
installments_payments=pd.read_csv('.. /input/installments_payments.csv')
POS_CASH_balance=pd.read_csv('.. /input/POS_CASH_balance.csv')
previous_application=pd.read_csv('.. /input/previous_application.csv' ) | Home Credit Default Risk |
1,056,158 | !sed 's/\+AF8-//g' /kaggle/input/chh-ola/train.csv > train.csv
!sed 's/_//g' /kaggle/input/chh-ola/test.csv > test.csv<data_type_conversions> | def check_missing_data(df):
total = df.isnull().sum().sort_values(ascending = False)
percent =(( df.isnull().sum() /df.isnull().count())*100 ).sort_values(ascending = False)
return pd.concat([total, percent], axis=1, keys=['Total', 'Percent'] ) | Home Credit Default Risk |
1,056,158 | class Ut:
@staticmethod
def to_timestamp(dt):
return dt_parse(dt, dayfirst=False ).timestamp()
@staticmethod
def flag_to_num(vl):
if vl == 'N':
return 0
else:
return 1
@staticmethod
def to_float(vl):
try:
if type(vl)== type('str'):
idx = vl.find('-')
if idx != -1:
txt = vl.split('-')
return float(txt[1])
return float(vl)
except:
print(vl)
return float(0)
@staticmethod
def rmse(predictions, targets):
return np.sqrt(mean_squared_error(np.exp(predictions), np.exp(targets)) )<load_from_csv> | def categorical_features(df):
cat_features=df.columns[df.dtypes=='object']
return list(cat_features ) | Home Credit Default Risk |
1,056,158 | train_set = pd.read_csv('train.csv', low_memory=False, dtype=str)
train_set.dropna(inplace=True)
train_set.reset_index(drop=True, inplace=True)
test_set = pd.read_csv('test.csv', low_memory=False, dtype=str)
test_set['totalamount'] = 0
train_set['PROPOSITO'] = 1
test_set['PROPOSITO'] = 0
all_set = pd.concat([train_set, test_set], ignore_index=True)
del(train_set)
del(test_set)
all_set['mtatax'] = 0.5
all_set['storedflag'] = all_set['storedflag'].apply(Ut.flag_to_num)
all_set['pickuptime'] = all_set['pickuptime'].apply(Ut.to_timestamp)
all_set['droptime'] = all_set['droptime'].apply(Ut.to_timestamp)
all_set['drivertip'] = all_set['drivertip'].apply(Ut.to_float)
all_set['mtatax'] = all_set['mtatax'].apply(Ut.to_float)
all_set['tollamount'] = all_set['tollamount'].apply(Ut.to_float)
all_set['extracharges'] = all_set['extracharges'].apply(Ut.to_float)
all_set['improvementcharge'] = all_set['improvementcharge'].apply(Ut.to_float)
all_set['totalamount'] = all_set['totalamount'].apply(Ut.to_float )<feature_engineering> | def onehot_encoding(df,cat_features_name):
df=pd.get_dummies(df,columns=cat_features_name)
return df | Home Credit Default Risk |
1,056,158 | all_set['totaltime'] = all_set['droptime'] - all_set['pickuptime']
all_set['taxes'] = all_set['drivertip'] + all_set['mtatax'] + all_set['tollamount'] + all_set['extracharges'] + all_set['improvementcharge']<data_type_conversions> | categorical_features(bureau ) | Home Credit Default Risk |
1,056,158 | features_cat = ['vendorid', 'paymentmethod', 'ratecode', 'storedflag']
features_num = ['drivertip', 'pickuploc', 'droploc', 'mtatax', 'distance', 'pickuptime', 'droptime', 'numpassengers',
'tollamount', 'extracharges', 'improvementcharge', 'totalamount', 'totaltime', 'taxes']
target = 'totalamount'
for col in features_num:
all_set[col] = all_set[col].astype(float)
for col in features_cat:
all_set[col] = all_set[col].astype(float)
all_set[col] = all_set[col].astype(str)
all_set['PROPOSITO'] = all_set['PROPOSITO'].astype(int)
all_set['ID'] = all_set['ID'].astype(int )<drop_column> | bureau.CREDIT_ACTIVE.value_counts() | Home Credit Default Risk |
1,056,158 | all_dum = pd.get_dummies(all_set)
train_df = all_dum[all_dum['PROPOSITO'] == 1].copy()
test_df = all_dum[all_dum['PROPOSITO'] == 0].copy()
del(all_dum)
train_df.drop(columns=['PROPOSITO'], inplace=True)
test_df.drop(columns=['PROPOSITO'], inplace=True)
train_df = train_df[train_df['pickuploc'] != train_df['droploc']]
train_df = train_df[train_df['totalamount'] > 0]
test_df.drop(columns=[target], inplace=True )<prepare_x_and_y> | bureau.CREDIT_CURRENCY.value_counts() | Home Credit Default Risk |
1,056,158 | features_to_keep = [
'taxes',
'pickuploc',
'ratecode_2.0',
'ratecode_1.0',
'ratecode_5.0',
'storedflag_0.0',
'ratecode_4.0',
'totaltime',
'ratecode_3.0',
'droploc',
'numpassengers',
'distance',
'storedflag_1.0',
'vendorid_2.0',
'paymentmethod_1.0',
'vendorid_1.0',
'paymentmethod_2.0'
]
X = train_df[features_to_keep].copy()
y = train_df[target].copy()
normalizer = Normalizer()
norm_X = normalizer.fit_transform(X)
y = np.log(y )<train_on_grid> | bureau.CREDIT_TYPE.value_counts() | Home Credit Default Risk |
1,056,158 | if False:
params = {
'colsample_bytree':[0.9],
'gamma':[0.3],
'max_depth': [9],
'min_child_weight':[2],
'subsample':[0.9],
'n_estimators': [50],
'objective': ['reg:squarederror'],
'n_jobs': [8],
}
eval_model = xgb.XGBRegressor(nthread=-1)
grid = GridSearchCV(eval_model, params, cv=2)
grid.fit(train_X, train_y)
pred_y = grid.predict(test_X)
print('RMSE Test = ', Ut.rmse(pred_y, test_y))
print(grid)
print(grid.best_params_ )<train_on_grid> | bureau.AMT_CREDIT_SUM.fillna(value=bureau.AMT_CREDIT_SUM.median() ,inplace=True ) | Home Credit Default Risk |
1,056,158 | if False:
params = {
'min_child_weight': st.randint(2, 9),
'gamma': st.uniform(0.1, 0.9),
'subsample': st.uniform(0.1, 0.9),
'colsample_bytree': st.uniform(0.1, 0.9),
'max_depth': st.randint(3, 9),
'n_estimators': [50],
'objective': ['reg:squarederror'],
}
eval_model = xgb.XGBRegressor(nthread=-1)
grid = RandomizedSearchCV(eval_model, params, cv=2, n_jobs=1, n_iter=10)
grid.fit(train_X, train_y)
pred_y = grid.predict(test_X)
print('RMSE Test = ', Ut.rmse(pred_y, test_y))
print(grid )<find_best_model_class> | bureau['DAYS_CREDIT_ENDDATE']=np.where(bureau.DAYS_CREDIT_ENDDATE.isnull() ,bureau.DAYS_ENDDATE_FACT,bureau.DAYS_CREDIT_ENDDATE ) | Home Credit Default Risk |
1,056,158 | train_X, test_X, train_y, test_y = train_test_split(norm_X, y, test_size=0.2, random_state=42)
params = {
'objective': 'reg:squarederror',
'n_estimators': 1000,
'subsample': 0.9,
'min_child_weight': 1,
'max_depth': 9,
'gamma': 0.3,
'colsample_bytree': 0.9,
'n_jobs': 8,
'verbose_eval':'False',
}
model = xgb.XGBRegressor(**params)
model.fit(train_X,train_y)
pred_y = model.predict(test_X)
print('RMSE Test = ', Ut.rmse(pred_y, test_y))<train_model> | bureau.DAYS_CREDIT_ENDDATE.fillna(value=0.0,inplace=True ) | Home Credit Default Risk |
1,056,158 | real_X = normalizer.transform(test_df[features_to_keep].copy())
model = xgb.XGBRegressor(**params)
model.fit(norm_X, y)
predictions = np.exp(model.predict(real_X))<save_to_csv> | bureau.drop('DAYS_ENDDATE_FACT',axis=1,inplace=True ) | Home Credit Default Risk |
1,056,158 | result = []
for idx in range(test_df.shape[0]):
result.append([idx, predictions[idx]])
result = pd.DataFrame(result, columns=['ID', 'total_amount'])
result.to_csv('result.csv', index=False )<drop_column> | bureau.AMT_CREDIT_MAX_OVERDUE.fillna(0.0,inplace=True ) | Home Credit Default Risk |
1,056,158 | !rm train.csv
!rm test.csv<import_modules> | bureau.AMT_CREDIT_SUM_LIMIT.fillna(0.0,inplace=True ) | Home Credit Default Risk |
1,056,158 | import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import statsmodels.api as sm<load_from_csv> | bureau.AMT_CREDIT_SUM_DEBT.fillna(0.0,inplace=True ) | Home Credit Default Risk |
1,056,158 | train_df = pd.read_csv('.. /input/train.csv', index_col=0)
test_df = pd.read_csv('.. /input/test.csv', index_col=0)
train_df.head()<count_missing_values> | bureau.drop('AMT_ANNUITY',axis=1,inplace=True ) | Home Credit Default Risk |
1,056,158 | train_df.isna().sum()<drop_column> | bureau_onehot=onehot_encoding(bureau,categorical_features(bureau))
bureau_onehot.head() | Home Credit Default Risk |
1,056,158 | cleaned_train_df = train_df.drop(['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating'], axis=1 )<data_type_conversions> | del bureau
gc.collect() | Home Credit Default Risk |
1,056,158 | cleaned_train_df.Year_of_Release.fillna(cleaned_train_df.Year_of_Release.median() , inplace=True )<count_values> | month_count=bureau_balance.groupby('SK_ID_BUREAU' ).size() | Home Credit Default Risk |
1,056,158 | cleaned_train_df.Genre.value_counts()<count_values> | bureau_balance.STATUS.value_counts() | Home Credit Default Risk |
1,056,158 | cleaned_train_df.Publisher.value_counts()<drop_column> | bureau_balance_unstack=bureau_balance.groupby('SK_ID_BUREAU')['STATUS'].value_counts(normalize = False ).unstack('STATUS')
bureau_balance_unstack.columns=['status_DPD0','status_DPD1','status_DPD2','status_DPD3','status_DPD4','status_DPD5','status_closed','status_X']
bureau_balance_unstack['month_count']=month_count
bureau_balance_unstack.fillna(value=0,inplace=True)
bureau_balance_unstack.head() | Home Credit Default Risk |
1,056,158 | cleaned_train_df.dropna(subset=['Genre', 'Publisher'], inplace=True )<count_missing_values> | del bureau_balance
gc.collect() | Home Credit Default Risk |
1,056,158 | cleaned_train_df.isna().sum()<data_type_conversions> | bureau_merge=bureau_onehot.merge(bureau_balance_unstack,how='left',on='SK_ID_BUREAU' ) | Home Credit Default Risk |
1,056,158 | cleaned_train_df.Year_of_Release = cleaned_train_df.Year_of_Release.astype('int64' )<feature_engineering> | cnt_id_bureau=bureau_merge[['SK_ID_CURR','SK_ID_BUREAU']].groupby('SK_ID_CURR' ).size() | Home Credit Default Risk |
1,056,158 | cleaned_train_df['JP_Sales_sqrt'] = np.sqrt(cleaned_train_df.JP_Sales)
cleaned_train_df['NA_Sales_sqrt'] = np.sqrt(cleaned_train_df.NA_Sales )<drop_column> | del bureau_merge,bureau_onehot,bureau_balance_unstack
gc.collect() | Home Credit Default Risk |
1,056,158 | cleaned_train_df.drop(['JP_Sales_sqrt', 'NA_Sales_sqrt'], axis=1, inplace=True )<drop_column> | categorical_features(previous_application ) | Home Credit Default Risk |
1,056,158 | cleaned_train_df.JP_Sales.replace({0: 0.001}, inplace=True)
cleaned_train_df.NA_Sales.replace({0: 0.001}, inplace=True )<feature_engineering> | previous_application.drop(['RATE_INTEREST_PRIVILEGED','RATE_INTEREST_PRIMARY'],axis=1,inplace=True ) | Home Credit Default Risk |
1,056,158 | cleaned_train_df.JP_Sales = np.log(cleaned_train_df.JP_Sales)
cleaned_train_df.NA_Sales = np.log(cleaned_train_df.NA_Sales )<drop_column> | previous_application.AMT_CREDIT.fillna(previous_application.AMT_CREDIT.median() ,inplace=True ) | Home Credit Default Risk |
1,056,158 | cleaned_train_df.drop('Publisher', axis=1, inplace=True )<count_values> | previous_application.CHANNEL_TYPE.value_counts() | Home Credit Default Risk |
1,056,158 | platform_counts = cleaned_train_df.Platform.value_counts()
platform_counts<feature_engineering> | previous_application.drop(['PRODUCT_COMBINATION','NAME_TYPE_SUITE',],axis=1,inplace=True ) | Home Credit Default Risk |
1,056,158 | uncommon_platforms = cleaned_train_df.Platform.isin(platform_counts.index[platform_counts<200])
cleaned_train_df.loc[uncommon_platforms, 'Platform'] = 'Other'<count_unique_values> | previous_application.RATE_DOWN_PAYMENT.fillna(previous_application.RATE_DOWN_PAYMENT.median() ,inplace=True ) | Home Credit Default Risk |
1,056,158 | platform_cats = list(cleaned_train_df.Platform.unique())
print(cleaned_train_df.Platform.nunique())
cleaned_train_df.Platform.value_counts()<categorify> | previous_application.AMT_DOWN_PAYMENT.fillna(0.0,inplace=True ) | Home Credit Default Risk |
1,056,158 | cleaned_train_df = pd.get_dummies(cleaned_train_df )<prepare_x_and_y> | previous_application.AMT_GOODS_PRICE.fillna(previous_application.AMT_GOODS_PRICE.mean() ,inplace=True ) | Home Credit Default Risk |
1,056,158 | X = cleaned_train_df.drop('NA_Sales', axis=1)
y = cleaned_train_df.NA_Sales<normalization> | previous_application.AMT_ANNUITY.fillna(previous_application.AMT_ANNUITY.mean() ,inplace=True ) | Home Credit Default Risk |
1,056,158 |
<split> | previous_application.CNT_PAYMENT.fillna(previous_application.CNT_PAYMENT.median() ,inplace=True ) | Home Credit Default Risk |
1,056,158 | X_train, X_test, y_train, y_test = train_test_split(X,y )<train_on_grid> | previous_application_onehot=onehot_encoding(previous_application,categorical_features(previous_application)) | Home Credit Default Risk |
1,056,158 | def stepwise_selection(X, y,
initial_list=[],
threshold_in=0.01,
threshold_out = 0.05,
verbose=True):
included = list(initial_list)
while True:
changed=False
excluded = list(set(X.columns)-set(included))
new_pval = pd.Series(index=excluded)
for new_column in excluded:
model = sm.OLS(y, sm.add_constant(pd.DataFrame(X[included+[new_column]])) ).fit()
new_pval[new_column] = model.pvalues[new_column]
best_pval = new_pval.min()
if best_pval < threshold_in:
best_feature = new_pval.idxmin()
included.append(best_feature)
changed=True
if verbose:
print('Add {:30} with p-value {:.6}'.format(best_feature, best_pval))
model = sm.OLS(y, sm.add_constant(pd.DataFrame(X[included])) ).fit()
pvalues = model.pvalues.iloc[1:]
worst_pval = pvalues.max()
if worst_pval > threshold_out:
changed=True
worst_feature = pvalues.argmax()
included.remove(worst_feature)
if verbose:
print('Drop {:30} with p-value {:.6}'.format(worst_feature, worst_pval))
if not changed:
break
return included<compute_test_metric> | cnt_id_prev1=previous_application_onehot[['SK_ID_CURR','SK_ID_PREV']].groupby('SK_ID_CURR' ).size() | Home Credit Default Risk |
1,056,158 | final_features = stepwise_selection(X_train, y_train )<train_model> | previous_application_min=previous_application_onehot.groupby('SK_ID_CURR' ).min().drop('SK_ID_PREV',axis=1)
previous_application_max=previous_application_onehot.groupby('SK_ID_CURR' ).max().drop('SK_ID_PREV',axis=1)
previous_application_median=previous_application_onehot.groupby('SK_ID_CURR' ).median().drop('SK_ID_PREV',axis=1)
| Home Credit Default Risk |
1,056,158 | predictors = sm.add_constant(X_train[final_features])
final_model = sm.OLS(y_train,predictors ).fit()
final_model.summary()<compute_train_metric> | previous_application_merge=previous_application_mean.merge(previous_application_min,on='SK_ID_CURR' ).merge(previous_application_max,on='SK_ID_CURR' ).merge(previous_application_median,on='SK_ID_CURR')
previous_application_merge['cnt_id_prev1']=cnt_id_prev1
previous_application_merge.fillna(0,inplace=True)
previous_application_merge.head() | Home Credit Default Risk |
1,056,158 | linreg = LinearRegression()
linreg.fit(X_train[final_features], y_train)
y_hat_train = linreg.predict(X_train[final_features])
y_hat_test = linreg.predict(X_test[final_features])
train_mse = mean_squared_error(y_train, y_hat_train)
test_mse = mean_squared_error(y_test, y_hat_test)
print("Train MSE:", train_mse)
print("Test MSE:", test_mse )<drop_column> | del previous_application,previous_application_max,previous_application_mean,previous_application_min,previous_application_onehot
gc.collect() | Home Credit Default Risk |
1,056,158 | cleaned_test_df = test_df.drop(['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating', 'Publisher'], axis=1)
cleaned_test_df.JP_Sales.replace({0: 0.001}, inplace=True)
cleaned_test_df.JP_Sales = np.log(cleaned_test_df.JP_Sales )<drop_column> | POS_CASH_balance.NAME_CONTRACT_STATUS.value_counts() | Home Credit Default Risk |
1,056,158 | plts = platform_cats
plts.remove('Other' )<feature_engineering> | check_missing_data(POS_CASH_balance ) | Home Credit Default Risk |
1,056,158 | cleaned_test_df.loc[~cleaned_test_df['Platform'].isin(plts), 'Platform'] = 'Other'<categorify> | POS_CASH_balance.CNT_INSTALMENT_FUTURE.fillna(POS_CASH_balance.CNT_INSTALMENT_FUTURE.median() ,inplace=True ) | Home Credit Default Risk |
1,056,158 | cleaned_test_df = pd.get_dummies(cleaned_test_df )<drop_column> | POS_CASH_balance.drop('CNT_INSTALMENT',axis=1,inplace=True ) | Home Credit Default Risk |
1,056,158 | cleaned_test_df = cleaned_test_df[final_features]<filter> | POS_CASH_balance_onehot=onehot_encoding(POS_CASH_balance,categorical_features(POS_CASH_balance))
POS_CASH_balance_onehot.head() | Home Credit Default Risk |
1,056,158 | test_data_notnull = cleaned_test_df[cleaned_test_df.Year_of_Release.notnull() ]<filter> | cnt_id_prev2=POS_CASH_balance_onehot[['SK_ID_CURR','SK_ID_PREV']].groupby('SK_ID_CURR' ).size() | Home Credit Default Risk |
1,056,158 | test_data_null = cleaned_test_df[cleaned_test_df.Year_of_Release.isna() ]<predict_on_test> | del POS_CASH_balance,POS_CASH_balance_onehot
gc.collect() | Home Credit Default Risk |
1,056,158 | test_data_notnull['Prediction'] = linreg.predict(test_data_notnull )<prepare_output> | categorical_features(credit_card_balance ) | Home Credit Default Risk |
1,056,158 | predictions = test_data_notnull['Prediction']<create_dataframe> | credit_card_balance.NAME_CONTRACT_STATUS.value_counts() | Home Credit Default Risk |
1,056,158 | predictions = pd.DataFrame(predictions )<feature_engineering> | credit_card_balance_onehot=onehot_encoding(credit_card_balance,categorical_features(credit_card_balance)) | Home Credit Default Risk |
1,056,158 | test_data_null['Prediction'] = cleaned_train_df.NA_Sales.median()<prepare_output> | credit_card_balance_onehot.fillna(credit_card_balance_onehot.median() ,inplace=True)
credit_card_balance.head() | Home Credit Default Risk |
1,056,158 | null_predictions = test_data_null['Prediction']<create_dataframe> | cnt_id_prev3=credit_card_balance_onehot[['SK_ID_CURR','SK_ID_PREV']].groupby('SK_ID_CURR' ).size() | Home Credit Default Risk |
1,056,158 | null_predictions = pd.DataFrame(null_predictions )<concatenate> | del credit_card_balance,credit_card_balance_onehot
gc.collect() | Home Credit Default Risk |
1,056,158 | predictions = predictions.append(null_predictions )<prepare_output> | check_missing_data(installments_payments ) | Home Credit Default Risk |
1,056,158 | predictions.Prediction = np.exp(predictions.Prediction )<feature_engineering> | categorical_features(installments_payments ) | Home Credit Default Risk |
1,056,158 | predictions['Id'] = predictions.index<prepare_output> | installments_payments.dropna(inplace=True ) | Home Credit Default Risk |
1,056,158 | predictions = predictions[['Id', 'Prediction']]<save_to_csv> | cnt_id_prev4=installments_payments[['SK_ID_CURR','SK_ID_PREV']].groupby('SK_ID_CURR' ).size() | Home Credit Default Risk |
1,056,158 | predictions.to_csv('submission.csv', index=False )<import_modules> | installments_payments_min=installments_payments.groupby('SK_ID_CURR' ).min().drop('SK_ID_PREV',axis=1)
installments_payments_max=installments_payments.groupby('SK_ID_CURR' ).max().drop('SK_ID_PREV',axis=1)
installments_payments_median=installments_payments.groupby('SK_ID_CURR' ).median().drop('SK_ID_PREV',axis=1 ) | Home Credit Default Risk |
1,056,158 | import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.linear_model import LinearRegression as LR
from sklearn.neural_network import MLPRegressor as MLPR<load_from_csv> | installments_payments_merge=installments_payments_min.merge(installments_payments_max,on='SK_ID_CURR' ).merge(installments_payments_median,on='SK_ID_CURR' ) | Home Credit Default Risk |
1,056,158 | data_dir = '.. /input/ieee-pes-bdc-datathon-year-2020'
df = pd.read_csv(f'{data_dir}/train.csv')
test_df = pd.read_csv(f'{data_dir}/test.csv' )<split> | installments_payments_merge['cnt_id_prev4']=cnt_id_prev4
installments_payments_merge.fillna(0,inplace=True)
installments_payments_merge.head() | Home Credit Default Risk |
1,056,158 | data_len = len(df)
pct = 1.0
train_len = int(1.0*data_len)
train_df = df[:train_len]
val_df = df[train_len:]<prepare_x_and_y> | del installments_payments,installments_payments_max,installments_payments_min
gc.collect() | Home Credit Default Risk |
1,056,158 | X_train = train_df.drop(['ID', 'global_horizontal_irradiance'], axis=1 ).values.reshape(-1, 6)
y_train = train_df['global_horizontal_irradiance'].values.reshape(len(train_df))<prepare_x_and_y> | target=application_train['TARGET'] | Home Credit Default Risk |
1,056,158 | X_val = val_df.drop(['ID', 'global_horizontal_irradiance'], axis=1 ).values.reshape(-1, 6)
y_val = val_df['global_horizontal_irradiance'].values.reshape(len(val_df))<prepare_x_and_y> | application_train.drop('TARGET',axis=1,inplace=True ) | Home Credit Default Risk |
1,056,158 | X_test = test_df.drop(['ID'], axis=1 ).values.reshape(-1, 6)
test_ID = test_df['ID'].values.reshape(len(test_df))<train_model> | application_train['TARGET']=target
application_train.head() | Home Credit Default Risk |
1,056,158 | reg = LR(normalize=True)
reg.fit(X_train, y_train )<predict_on_test> | application_test['TARGET']=-999 | Home Credit Default Risk |
1,056,158 | preds = reg.predict(X_test )<train_model> | df=pd.concat([application_train,application_test] ) | Home Credit Default Risk |
1,056,158 | regr = MLPR(random_state=1, hidden_layer_sizes =(32, 8, 2), max_iter=5, validation_fraction=0.1, learning_rate_init=0.02, verbose=True)
regr.fit(X_train, y_train )<predict_on_test> | categorical_features(df ) | Home Credit Default Risk |
1,056,158 | preds = regr.predict(X_test)
preds = [0 if p<0 else p for p in preds]<save_to_csv> | df_onehot=onehot_encoding(df,categorical_features(df))
df_onehot.shape | Home Credit Default Risk |
1,056,158 | zippedList = list(zip(test_ID, preds))
submission = pd.DataFrame(zippedList, columns = ['ID','global_horizontal_irradiance'])
submission.to_csv('submission.csv', index=False )<set_options> | df_onehot.fillna(0,inplace=True ) | Home Credit Default Risk |
1,056,158 | pd.options.display.max_columns = 999
warnings.simplefilter(action='ignore')
<load_from_csv> | del application_test,application_train,df
gc.collect() | Home Credit Default Risk |
1,056,158 | test = pd.read_csv(".. /input/seleksidukungaib/test.csv")
train = pd.read_csv(".. /input/seleksidukungaib/train.csv")
sample_submission = pd.read_csv(".. /input/seleksidukungaib/sample_submission.csv" )<define_variables> | total=df_onehot.merge(right=bureau_final_median,on='SK_ID_CURR',how='left' ).merge(right=previous_application_median,on='SK_ID_CURR',how='left' ).merge(right=POS_CASH_balance_median,on='SK_ID_CURR',how='left' ).merge(right=credit_card_balance_median,on='SK_ID_CURR',how='left' ).merge(right=installments_payments_merge,on='SK_ID_CURR',how='left')
total.shape | Home Credit Default Risk |
1,056,158 | dropped_column = ['idx', 'userId', 'num_transfer_trx', 'max_transfer_trx',
'min_transfer_trx', 'date', 'date_collected', 'isUpgradedUser']<concatenate> | del total,df_onehot,bureau_final_median,previous_application_merge,previous_application_median
del POS_CASH_balance_median,credit_card_balance_median,installments_payments_median,installments_payments_merge
gc.collect() | Home Credit Default Risk |
1,056,158 | data = pd.concat([train,test],ignore_index=True)
data = data.drop(dropped_column, axis = 1 )<drop_column> | df_train=df_total[df_total.TARGET!=-999]
| Home Credit Default Risk |
1,056,158 | data = data.drop(['average_transfer_trx'], axis = 1 )<drop_column> | df_test=df_total[df_total.TARGET==-999]
| Home Credit Default Risk |
1,056,158 | data.loc[data.isActive.isnull() == True]
data = data.dropna(subset=["isActive"] )<feature_engineering> | test=df_test.drop(columns=["SK_ID_CURR",'TARGET'],axis=1)
test.shape | Home Credit Default Risk |
1,056,158 | data['premium'] = data['premium'].fillna(data['premium'].mode() )<feature_engineering> | y=df_train['TARGET'].values
y | Home Credit Default Risk |
1,056,158 | for column in data.columns:
if(column != "isChurned"):
data[column] = data[column].fillna(data[column].median() )<categorify> | train=df_train.drop(columns=["SK_ID_CURR",'TARGET'],axis=1 ).values
train.shape | Home Credit Default Risk |
1,056,158 | categorical_features = ['premium', 'super', 'pinEnabled']
le = LabelEncoder()
for col in categorical_features:
data[col] = le.fit_transform(list(data[col].values))<set_options> | del df_train,df_test,df_total
gc.collect() | Home Credit Default Risk |
1,056,158 | Q3 = data.quantile(0.85 )<feature_engineering> | gc.collect() | Home Credit Default Risk |
1,056,158 | numerik_col = ['average_recharge_trx','average_topup_trx','max_recharge_trx','max_topup_trx',
'min_recharge_trx','min_topup_trx','num_recharge_trx','num_topup_trx','num_transaction',
'random_number','total_transaction']
<drop_column> | from sklearn.model_selection import train_test_split | Home Credit Default Risk |
1,056,158 | data['num_transaction_plus_num_recharge'] = data['num_transaction'] + data['num_recharge_trx']
data.drop(['num_transaction', 'num_recharge_trx'], axis=1, inplace = True)
<split> | from sklearn.model_selection import train_test_split | Home Credit Default Risk |
1,056,158 | train = data[~data.isChurned.isnull() ]
test = data[data.isChurned.isnull() ]
numerik_col = ['max_recharge_trx','average_recharge_trx',
'average_topup_trx', 'max_topup_trx',
'min_recharge_trx','min_topup_trx','num_topup_trx',
'random_number','total_transaction',
]
for col in(numerik_col):
train[col]=(( train[col]-train[col].min())/(train[col].max() -train[col].min()))
test[col]=(( test[col]-test[col].min())/(test[col].max() -test[col].min()))
<count_duplicates> | X_train,X_test,y_train,y_test=train_test_split(train,y,test_size=0.2 ) | Home Credit Default Risk |
1,056,158 | train.duplicated().value_counts()
<remove_duplicates> | del train
gc.collect() | Home Credit Default Risk |
1,056,158 | train.drop_duplicates(keep = 'first', inplace = True)
<set_options> | import lightgbm | Home Credit Default Risk |
1,056,158 | train.corr().style.background_gradient(cmap='coolwarm' )<count_values> | train_data=lightgbm.Dataset(X_train,label=y_train)
valid_data=lightgbm.Dataset(X_test,label=y_test ) | Home Credit Default Risk |
1,056,158 | min_cor = ['isActive', 'isVerifiedPhone', 'blocked', 'super', 'random_number']
for col in min_cor:
print('====== ', col, " ======")
print(train[col].value_counts())
<drop_column> | params = {'boosting_type': 'gbdt',
'max_depth' : 10,
'objective': 'binary',
'nthread': 5,
'num_leaves': 64,
'learning_rate': 0.1,
'max_bin': 512,
'subsample_for_bin': 200,
'subsample': 1,
'subsample_freq': 1,
'colsample_bytree': 0.8,
'reg_alpha': 5,
'reg_lambda': 10,
'min_split_gain': 0.005,
'min_child_weight': 1,
'min_child_samples': 5,
'scale_pos_weight': 1,
'num_class' : 1,
'metric' : 'auc'
} | Home Credit Default Risk |
1,056,158 | drop_from_cor = ['isActive', 'isVerifiedPhone', 'blocked', 'super', 'random_number']
train.drop(drop_from_cor, axis = 1, inplace = True)
test.drop(drop_from_cor, axis = 1, inplace = True )<import_modules> | lgbm = lightgbm.train(params,
train_data,
25000,
valid_sets=valid_data,
early_stopping_rounds= 80,
verbose_eval= 10
) | Home Credit Default Risk |
1,056,158 | from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm.classes import OneClassSVM
from sklearn.neural_network.multilayer_perceptron import MLPClassifier
from sklearn.neighbors.classification import RadiusNeighborsClassifier
from sklearn.neighbors.classification import KNeighborsClassifier
from sklearn.multioutput import ClassifierChain
from sklearn.multioutput import MultiOutputClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.passive_aggressive import PassiveAggressiveClassifier
from sklearn.gaussian_process.gpc import GaussianProcessClassifier
from sklearn.ensemble.weight_boosting import AdaBoostClassifier
from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier
from sklearn.ensemble.bagging import BaggingClassifier
from sklearn.ensemble.forest import ExtraTreesClassifier
from sklearn.ensemble.forest import RandomForestClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.calibration import CalibratedClassifierCV
from sklearn.naive_bayes import GaussianNB
from sklearn.semi_supervised import LabelPropagation
from sklearn.semi_supervised import LabelSpreading
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegressionCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import NearestCentroid
from sklearn.svm import NuSVC
from sklearn.linear_model import Perceptron
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.svm import SVC
import xgboost as xgb
from xgboost import XGBClassifier
<prepare_x_and_y> | predictions_lgbm_prob = lgbm.predict(test.values ) | Home Credit Default Risk |
1,056,158 | Y = train["isChurned"]
X = train.drop(["isChurned"], axis = 1 )<split> | sub=pd.read_csv('.. /input/sample_submission.csv' ) | Home Credit Default Risk |
1,056,158 | random_state = 1
X_train, X_valid, y_train, y_valid = train_test_split(X, Y, test_size = 0.2, random_state = random_state )<choose_model_class> | sub.TARGET=predictions_lgbm_prob | Home Credit Default Risk |
1,056,158 | def get_kfold() :
return KFold(n_splits=5, shuffle=True, random_state=1 )<choose_model_class> | sub.to_csv('sub.csv',index=False ) | Home Credit Default Risk |
1,046,068 | all_model = [RandomForestClassifier() ,ExtraTreeClassifier() , LogisticRegression() ,RidgeClassifier() ,
DecisionTreeClassifier() , KNeighborsClassifier() , PassiveAggressiveClassifier() ,
]<choose_model_class> | df_application = pd.read_csv('.. /input/application_train.csv')
df_application_test = pd.read_csv('.. /input/application_test.csv')
df_application.head() | Home Credit Default Risk |
1,046,068 | params = {'loss_function':'Logloss',
'eval_metric':'F1',
'iterations' : 1000,
'learning_rate': 0.01,
'verbose': 1000,
'random_seed': random_state
}
cbc = CatBoostClassifier(**params )<prepare_x_and_y> | df_application['Source'] = 'Train'
df_application_test['Source'] = 'Test'
df = pd.concat(( df_application,df_application_test),axis = 0,sort = False)
cat_cols = [col for col in df.columns if(df[col].dtype == object)&(col != 'Source')]
le = preprocessing.LabelEncoder()
for col in cat_cols:
df[col] = le.fit_transform(df[col].fillna("Missing"))
df.head()
df_train = df[df['Source'] == "Train"].drop('Source', axis =1)
df_test = df[df['Source'] == "Test"].drop('Source', axis =1)
del df | Home Credit Default Risk |
1,046,068 | data_dmatrix = xgb.DMatrix(data=X,label=Y )<train_on_grid> | df_bureau = pd.read_csv(".. /input/bureau.csv")
df_bureau_balance = pd.read_csv(".. /input/bureau_balance.csv")
df_bureau_balance["MONTHS_BALANCE"]= np.abs(df_bureau_balance["MONTHS_BALANCE"])
df_bureau_balance["Period"] = np.where(( df_bureau_balance["MONTHS_BALANCE"] < 7),"short",np.where(( df_bureau_balance["MONTHS_BALANCE"] < 13),"medium","long"))
df_bureau_balance["Period_status"] = df_bureau_balance["Period"].astype(str)+ "_" + df_bureau_balance["STATUS"]
df_bureau_balance.head(5)
| Home Credit Default Risk |
1,046,068 | params = {"objective":"binary:logistic",'colsample_bytree': 0.3,'learning_rate': 0.1,
'max_depth': 10, 'alpha': 10}
<compute_train_metric> | df_bureau_balance = df_bureau_balance.groupby(["SK_ID_BUREAU","Period_status"])\
.agg({"MONTHS_BALANCE" : ["count","min","max","mean"]})\
.reset_index()
df_bureau_balance.columns = [''.join(col ).strip() for col in df_bureau_balance.columns.values]
df_bureau_balance.head() | Home Credit Default Risk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.