kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
1,457,238 | for col in train_data.columns:
msg = 'column: {:>10}\t Percent of NaN value: {:.2f}%'.format(col, 100 *(train_data[col].isnull().sum() / train_data[col].shape[0]))
print(msg )<count_missing_values> | credit_NAME_CONTRACT_STATUS_Signed_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Signed'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Signed': 'credit_NAME_CONTRACT_STATUS_Signed_mean'})
dataset = dataset.merge(credit_NAME_CONTRACT_STATUS_Signed_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del credit_NAME_CONTRACT_STATUS_Signed_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | for col in test_data.columns:
msg = 'column: {:>10}\t Percent of NaN value: {:.2f}%'.format(col, 100 *(test_data[col].isnull().sum() / test_data[col].shape[0]))
print(msg )<groupby> | gc.enable()
del credit, credit_stats_by_prev
gc.collect() | Home Credit Default Risk |
1,457,238 | train_data[['Pclass', 'Survived']].groupby(['Pclass'], as_index=True ).count()
<groupby> | install = pd.read_csv('.. /input/installments_payments.csv')
install.head() | Home Credit Default Risk |
1,457,238 | train_data[['Pclass', 'Survived']].groupby(['Pclass'], as_index=True ).sum()
<sort_values> | install['DAYS_DIFF'] = install['DAYS_INSTALMENT'] - install['DAYS_ENTRY_PAYMENT']
install['AMT_DIFF'] = install['AMT_INSTALMENT'] - install['AMT_PAYMENT']
install.head() | Home Credit Default Risk |
1,457,238 | train_data[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived', ascending=False)
<feature_engineering> | install_stats_by_prev = install[['SK_ID_PREV', 'SK_ID_CURR']] | Home Credit Default Risk |
1,457,238 | train_data['FamilySize'] = train_data['SibSp'] + train_data['Parch'] + 1
test_data['FamilySize'] = test_data['SibSp'] + test_data['Parch'] + 1<feature_engineering> | install_NUM_INSTALMENT_VERSION_count = install.groupby('SK_ID_PREV', as_index=False)['NUM_INSTALMENT_VERSION'].count().rename(columns = {'NUM_INSTALMENT_VERSION': 'install_NUM_INSTALMENT_VERSION_count'})
install_NUM_INSTALMENT_VERSION_max = install.groupby('SK_ID_PREV', as_index=False)['NUM_INSTALMENT_VERSION'].max().rename(columns = {'NUM_INSTALMENT_VERSION': 'install_NUM_INSTALMENT_VERSION_max'})
install_stats_by_prev = install_stats_by_prev.merge(install_NUM_INSTALMENT_VERSION_count, on = 'SK_ID_PREV', how = 'left')
install_stats_by_prev = install_stats_by_prev.merge(install_NUM_INSTALMENT_VERSION_max, on = 'SK_ID_PREV', how = 'left' ) | Home Credit Default Risk |
1,457,238 | test_data.loc[test_data.Fare.isnull() , 'Fare'] = test_data['Fare'].mean()
train_data['Fare'] = train_data['Fare'].map(lambda i: np.log(i)if i > 0 else 0)
test_data['Fare'] = test_data['Fare'].map(lambda i: np.log(i)if i > 0 else 0 )<categorify> | gc.enable()
del install_NUM_INSTALMENT_VERSION_count, install_NUM_INSTALMENT_VERSION_max
gc.collect() | Home Credit Default Risk |
1,457,238 | def get_one_hot(array):
return np.array(( array['Pclass'] == 1, array['Pclass'] == 2,
array['Pclass'] == 3, array['Sex'] == 'male',
array['Sex'] == 'female', array['SibSp'],
array['Parch'], array['Fare'],
array['Embarked'] == 'C', array['Embarked'] == 'Q',
array['Embarked'] == 'S')).swapaxes(0, 1 ).astype('float32' )<drop_column> | install_DAYS_INSTALMENT_mean = install.groupby('SK_ID_PREV', as_index=False)['DAYS_INSTALMENT'].mean().rename(columns = {'DAYS_INSTALMENT': 'install_DAYS_INSTALMENT_mean'})
install_stats_by_prev = install_stats_by_prev.merge(install_DAYS_INSTALMENT_mean, on = 'SK_ID_PREV', how = 'left')
gc.enable()
del install_DAYS_INSTALMENT_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | x_train = train_data[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
x_train.head()<categorify> | install_DAYS_ENTRY_PAYMENT_mean = install.groupby('SK_ID_PREV', as_index=False)['DAYS_ENTRY_PAYMENT'].mean().rename(columns = {'DAYS_ENTRY_PAYMENT': 'install_DAYS_ENTRY_PAYMENT_mean'})
install_stats_by_prev = install_stats_by_prev.merge(install_DAYS_ENTRY_PAYMENT_mean, on = 'SK_ID_PREV', how = 'left')
gc.enable()
del install_DAYS_ENTRY_PAYMENT_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | x_train = get_one_hot(x_train)
x_train[:10]<prepare_x_and_y> | install_AMT_INSTALMENT_mean = install.groupby('SK_ID_PREV', as_index=False)['AMT_INSTALMENT'].mean().rename(columns = {'AMT_INSTALMENT': 'install_AMT_INSTALMENT_mean'})
install_stats_by_prev = install_stats_by_prev.merge(install_AMT_INSTALMENT_mean, on = 'SK_ID_PREV', how = 'left')
gc.enable()
del install_AMT_INSTALMENT_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | y_train = np.array(train_data['Survived'])
y_train[:10]<categorify> | install_AMT_PAYMENT_mean = install.groupby('SK_ID_PREV', as_index=False)['AMT_PAYMENT'].mean().rename(columns = {'AMT_PAYMENT': 'install_AMT_PAYMENT_mean'})
install_stats_by_prev = install_stats_by_prev.merge(install_AMT_PAYMENT_mean, on = 'SK_ID_PREV', how = 'left')
gc.enable()
del install_AMT_PAYMENT_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | x_test = get_one_hot(x_test)
x_test[:10]<drop_column> | install_DAYS_DIFF_mean = install.groupby('SK_ID_PREV', as_index=False)['DAYS_DIFF'].mean().rename(columns = {'DAYS_DIFF': 'install_DAYS_DIFF_mean'})
install_DAYS_DIFF_max = install.groupby('SK_ID_PREV', as_index=False)['DAYS_DIFF'].max().rename(columns = {'DAYS_DIFF': 'install_DAYS_DIFF_max'})
install_DAYS_DIFF_min = install.groupby('SK_ID_PREV', as_index=False)['DAYS_DIFF'].min().rename(columns = {'DAYS_DIFF': 'install_DAYS_DIFF_min'})
install_stats_by_prev = install_stats_by_prev.merge(install_DAYS_DIFF_mean, on = 'SK_ID_PREV', how = 'left')
install_stats_by_prev = install_stats_by_prev.merge(install_DAYS_DIFF_max, on = 'SK_ID_PREV', how = 'left')
install_stats_by_prev = install_stats_by_prev.merge(install_DAYS_DIFF_min, on = 'SK_ID_PREV', how = 'left')
gc.enable()
del install_DAYS_DIFF_mean, install_DAYS_DIFF_max, install_DAYS_DIFF_min
gc.collect() | Home Credit Default Risk |
1,457,238 | train_data.drop(['PassengerId','Name','SibSp','Parch','Ticket','Cabin'], axis=1, inplace=True)
test_data.drop(['PassengerId','Name','SibSp','Parch','Ticket','Cabin'], axis=1, inplace=True )<import_modules> | install_AMT_DIFF_mean = install.groupby('SK_ID_PREV', as_index=False)['AMT_DIFF'].mean().rename(columns = {'AMT_DIFF': 'install_AMT_DIFF_mean'})
install_AMT_DIFF_max = install.groupby('SK_ID_PREV', as_index=False)['AMT_DIFF'].max().rename(columns = {'AMT_DIFF': 'install_AMT_DIFF_max'})
install_AMT_DIFF_min = install.groupby('SK_ID_PREV', as_index=False)['AMT_DIFF'].min().rename(columns = {'AMT_DIFF': 'install_AMT_DIFF_min'})
install_stats_by_prev = install_stats_by_prev.merge(install_AMT_DIFF_mean, on = 'SK_ID_PREV', how = 'left')
install_stats_by_prev = install_stats_by_prev.merge(install_AMT_DIFF_max, on = 'SK_ID_PREV', how = 'left')
install_stats_by_prev = install_stats_by_prev.merge(install_AMT_DIFF_min, on = 'SK_ID_PREV', how = 'left')
gc.enable()
del install_AMT_DIFF_mean, install_AMT_DIFF_max, install_AMT_DIFF_min
gc.collect() | Home Credit Default Risk |
1,457,238 | from mlxtend.classifier import StackingCVClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer<normalization> | install_NUM_INSTALMENT_VERSION_count_mean = install_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['install_NUM_INSTALMENT_VERSION_count'].mean().rename(columns = {'install_NUM_INSTALMENT_VERSION_count': 'install_NUM_INSTALMENT_VERSION_count_mean'})
dataset = dataset.merge(install_NUM_INSTALMENT_VERSION_count_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del install_NUM_INSTALMENT_VERSION_count_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp = imp.fit(x_train)
x_train_imp = imp.transform(x_train )<choose_model_class> | install_NUM_INSTALMENT_VERSION_max_max = install_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['install_NUM_INSTALMENT_VERSION_max'].max().rename(columns = {'install_NUM_INSTALMENT_VERSION_max': 'install_NUM_INSTALMENT_VERSION_max_max'})
dataset = dataset.merge(install_NUM_INSTALMENT_VERSION_max_max, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del install_NUM_INSTALMENT_VERSION_max_max
gc.collect() | Home Credit Default Risk |
1,457,238 | clf1 = RandomForestClassifier()
clf2 = GradientBoostingClassifier()
lr = LogisticRegression()
sclf = StackingCVClassifier(classifiers=[clf1, clf2], meta_classifier=lr )<define_search_space> | install_DAYS_INSTALMENT_mean_mean = install_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['install_DAYS_INSTALMENT_mean'].mean().rename(columns = {'install_DAYS_INSTALMENT_mean': 'install_DAYS_INSTALMENT_mean_mean'})
dataset = dataset.merge(install_DAYS_INSTALMENT_mean_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del install_DAYS_INSTALMENT_mean_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | param_test = {'randomforestclassifier__n_estimators': [10, 120],
'randomforestclassifier__max_depth': [2, 15],
'gradientboostingclassifier__n_estimators': [10, 120],
'gradientboostingclassifier__max_depth': [2, 15],
'gradientboostingclassifier__learning_rate' : [0.01, 0.1],
'meta_classifier__C': [0.1, 10.0]}<train_model> | install_DAYS_ENTRY_PAYMENT_mean_mean = install_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['install_DAYS_ENTRY_PAYMENT_mean'].mean().rename(columns = {'install_DAYS_ENTRY_PAYMENT_mean': 'install_DAYS_ENTRY_PAYMENT_mean_mean'})
dataset = dataset.merge(install_DAYS_ENTRY_PAYMENT_mean_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del install_DAYS_ENTRY_PAYMENT_mean_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | sclf.fit(x_train_imp, y_train )<normalization> | install_AMT_INSTALMENT_mean_mean = install_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['install_AMT_INSTALMENT_mean'].mean().rename(columns = {'install_AMT_INSTALMENT_mean': 'install_AMT_INSTALMENT_mean_mean'})
dataset = dataset.merge(install_AMT_INSTALMENT_mean_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del install_AMT_INSTALMENT_mean_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | X_test_imp = imp.transform(x_test )<load_from_csv> | install_AMT_PAYMENT_mean_mean = install_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['install_AMT_PAYMENT_mean'].mean().rename(columns = {'install_AMT_PAYMENT_mean': 'install_AMT_PAYMENT_mean_mean'})
dataset = dataset.merge(install_AMT_PAYMENT_mean_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del install_AMT_PAYMENT_mean_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | submission = pd.read_csv('.. /input/sample_submission.csv' )<prepare_output> | install_DAYS_DIFF_mean_mean = install_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['install_DAYS_DIFF_mean'].mean().rename(columns = {'install_DAYS_DIFF_mean': 'install_DAYS_DIFF_mean_mean'})
dataset = dataset.merge(install_DAYS_DIFF_mean_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del install_DAYS_DIFF_mean_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | submission = pd.DataFrame(data, columns=['PassengerId', 'Survived'])
submission.set_index('PassengerId', inplace=True)
submission.head()<predict_on_test> | install_DAYS_DIFF_max_mean = install_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['install_DAYS_DIFF_max'].mean().rename(columns = {'install_DAYS_DIFF_max': 'install_DAYS_DIFF_max_mean'})
dataset = dataset.merge(install_DAYS_DIFF_max_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del install_DAYS_DIFF_max_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | prediction = sclf.predict(x_test)
submission['Survived'] = prediction
prediction<save_to_csv> | install_DAYS_DIFF_min_mean = install_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['install_DAYS_DIFF_min'].mean().rename(columns = {'install_DAYS_DIFF_min': 'install_DAYS_DIFF_min_mean'})
dataset = dataset.merge(install_DAYS_DIFF_min_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del install_DAYS_DIFF_min_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | submission.to_csv('./submission.csv',index= False )<import_modules> | install_AMT_DIFF_mean_mean = install_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['install_AMT_DIFF_mean'].mean().rename(columns = {'install_AMT_DIFF_mean': 'install_AMT_DIFF_mean_mean'})
dataset = dataset.merge(install_AMT_DIFF_mean_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del install_AMT_DIFF_mean_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | import graphviz
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import tree
from sklearn.model_selection import cross_val_score, GridSearchCV<feature_engineering> | install_AMT_DIFF_max_mean = install_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['install_AMT_DIFF_max'].mean().rename(columns = {'install_AMT_DIFF_max': 'install_AMT_DIFF_max_mean'})
dataset = dataset.merge(install_AMT_DIFF_max_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del install_AMT_DIFF_max_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | train = pd.read_csv('.. /input/train.csv' ).set_index('PassengerId')
test = pd.read_csv('.. /input/test.csv' ).set_index('PassengerId')
df = pd.concat([train, test], axis=0, sort=False)
df['Title'] = df.Name.str.split(',' ).str[1].str.split('.' ).str[0].str.strip()
df['IsWomanOrChild'] =(( df.Title == 'Master')|(df.Sex == 'female'))
df['LastName'] = df.Name.str.split(',' ).str[0]
family = df.groupby(df.LastName ).Survived
df['FamilyTotalCount'] = family.transform(lambda s: s[df.IsWomanOrChild].fillna(0 ).count())
df['FamilyTotalCount'] = df.mask(df.IsWomanOrChild, df.FamilyTotalCount - 1, axis=0)
df['FamilySurvivedCount'] = family.transform(lambda s: s[df.IsWomanOrChild].fillna(0 ).sum())
df['FamilySurvivedCount'] = df.mask(df.IsWomanOrChild, df.FamilySurvivedCount - df.Survived.fillna(0), axis=0)
df['FamilySurvivalRate'] =(df.FamilySurvivedCount / df.FamilyTotalCount.replace(0, np.nan))
df['IsSingleTraveler'] = df.FamilyTotalCount == 0<groupby> | install_AMT_DIFF_min_mean = install_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['install_AMT_DIFF_min'].mean().rename(columns = {'install_AMT_DIFF_min': 'install_AMT_DIFF_min_mean'})
dataset = dataset.merge(install_AMT_DIFF_min_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del install_AMT_DIFF_min_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | name = df.groupby(df.LastName ).Ticket
name.value_counts()<feature_engineering> | gc.enable()
del install, install_stats_by_prev
gc.collect() | Home Credit Default Risk |
1,457,238 | df['SingleTraveler'] = 3
ticket_list_single = []
ticket_list_family = []
ticket_list_null = []
for ticket_id in list(df['Ticket'].unique()):
count = df[df['Ticket']==ticket_id].count() [0]
if count > 1 :
ticket_list_family.append(ticket_id)
else:
ticket_list_single.append(ticket_id)
def tune_SingleTraveler(df):
for ticket in ticket_list_single:
df.ix[df.Ticket == ticket, "SingleTraveler"] = True
for ticket in ticket_list_family:
df.ix[df.Ticket == ticket, "SingleTraveler"] = False
tune_SingleTraveler(df)
<count_values> | cash = pd.read_csv('.. /input/POS_CASH_balance.csv')
cash.head() | Home Credit Default Risk |
1,457,238 | names = []
for name in list(df['LastName'].unique()):
count = df[df['LastName']==name].count() [0]
if count > 1 :
names.append(name)
for name in names:
df.ix[df.LastName == name, "SingleTraveler"] = False
print(df['SingleTraveler'].sum() )<groupby> | cash_stats_by_prev = cash[['SK_ID_PREV', 'SK_ID_CURR']] | Home Credit Default Risk |
1,457,238 | df['Family_freq'] = df.groupby('LastName')['LastName'].transform('count' )<count_missing_values> | cash_MONTHS_BALANCE_count = cash.groupby('SK_ID_PREV', as_index=False)['MONTHS_BALANCE'].count().rename(columns = {'MONTHS_BALANCE': 'cash_MONTHS_BALANCE_count'})
cash_MONTHS_BALANCE_mean = cash.groupby('SK_ID_PREV', as_index=False)['MONTHS_BALANCE'].mean().rename(columns = {'MONTHS_BALANCE': 'cash_MONTHS_BALANCE_mean'})
cash_stats_by_prev = cash_stats_by_prev.merge(cash_MONTHS_BALANCE_count, on = 'SK_ID_PREV', how = 'left')
cash_stats_by_prev = cash_stats_by_prev.merge(cash_MONTHS_BALANCE_mean, on = 'SK_ID_PREV', how = 'left')
gc.enable()
del cash_MONTHS_BALANCE_count, cash_MONTHS_BALANCE_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | df['Sex'].isnull().sum()<feature_engineering> | cash_CNT_INSTALMENT_mean = cash.groupby('SK_ID_PREV', as_index=False)['CNT_INSTALMENT'].mean().rename(columns = {'CNT_INSTALMENT': 'cash_CNT_INSTALMENT_mean'})
cash_stats_by_prev = cash_stats_by_prev.merge(cash_CNT_INSTALMENT_mean, on = 'SK_ID_PREV', how = 'left')
gc.enable()
del cash_CNT_INSTALMENT_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | df['All'] =(( df.Sex == 'male')|(df.Sex == 'female'))
df['LastName'] = df.Name.str.split(',' ).str[0]
family = df.groupby(df.LastName ).Survived
df['FamilyTotalCountM'] = family.transform(lambda s: s[df.All].fillna(0 ).count())
df['FamilyTotalCountM'] = df.mask(df.All, df.FamilyTotalCountM - 1, axis=0)
df['FamilySurvivedCountM'] = family.transform(lambda s: s[df.All].fillna(0 ).sum())
df['FamilySurvivedCountM'] = df.mask(df.All, df.FamilySurvivedCountM - df.Survived.fillna(0), axis=0)
df['FamilySurvivalRateM'] =(df.FamilySurvivedCountM / df.FamilyTotalCountM.replace(0, np.nan))<count_missing_values> | cash_CNT_INSTALMENT_FUTURE_mean = cash.groupby('SK_ID_PREV', as_index=False)['CNT_INSTALMENT_FUTURE'].mean().rename(columns = {'CNT_INSTALMENT_FUTURE': 'cash_CNT_INSTALMENT_FUTURE_mean'})
cash_stats_by_prev = cash_stats_by_prev.merge(cash_CNT_INSTALMENT_FUTURE_mean, on = 'SK_ID_PREV', how = 'left')
gc.enable()
del cash_CNT_INSTALMENT_FUTURE_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | df['All'].isnull().sum()<feature_engineering> | cash_SK_DPD_mean = cash.groupby('SK_ID_PREV', as_index=False)['SK_DPD'].mean().rename(columns = {'SK_DPD': 'cash_SK_DPD_mean'})
cash_stats_by_prev = cash_stats_by_prev.merge(cash_SK_DPD_mean, on = 'SK_ID_PREV', how = 'left')
gc.enable()
del cash_SK_DPD_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | _ = df.rename({'Cabin':'Deck'},axis=1,inplace=True)
df['Deck'] = df['Deck'].fillna('N')
len(df['Deck'] )<feature_engineering> | cash_SK_DPD_DEF_mean = cash.groupby('SK_ID_PREV', as_index=False)['SK_DPD_DEF'].mean().rename(columns = {'SK_DPD_DEF': 'cash_SK_DPD_DEF_mean'})
cash_stats_by_prev = cash_stats_by_prev.merge(cash_SK_DPD_DEF_mean, on = 'SK_ID_PREV', how = 'left')
gc.enable()
del cash_SK_DPD_DEF_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | def cabin_to_deck(row):
return row['Deck'][0]
df['Deck'] = df.apply(cabin_to_deck,axis=1 )<count_unique_values> | cash_cats = pd.get_dummies(cash.select_dtypes('object'))
cash_cats['SK_ID_PREV'] = cash['SK_ID_PREV']
cash_cats.head() | Home Credit Default Risk |
1,457,238 | ticket_list = []
for ticket_id in list(df['Ticket'].unique()):
count = df[df['Ticket']==ticket_id].count() [0]
decks = df[df['Ticket']==ticket_id]['Deck']
empty_decks =(decks=='N' ).sum()
if(count > 1)and(empty_decks > 0)and(empty_decks < len(decks)) :
ticket_list.append(ticket_id)
print(ticket_list )<filter> | cash_cats_grouped = cash_cats.groupby('SK_ID_PREV' ).agg('sum')
cash_cats_grouped.head() | Home Credit Default Risk |
1,457,238 | for ticket in ticket_list:
display(df[df['Ticket']==ticket] )<feature_engineering> | cash_stats_by_prev = cash_stats_by_prev.merge(cash_cats_grouped, on = 'SK_ID_PREV', how = 'left')
gc.enable()
del cash_cats_grouped, cash_cats
gc.collect() | Home Credit Default Risk |
1,457,238 | _ = df.set_value(533,'Deck',value=df.loc[128]['Deck'])
_ = df.set_value(1308,'Deck',value=df.loc[128]['Deck'])
_ = df.set_value(258,'Deck',df.loc[679]['Deck'])
_ = df.set_value(373,'Deck',value='C')
_ = df.set_value(290,'Deck',value=df.loc[741]['Deck'])
_ = df.set_value(708,'Deck',value=df.loc[297]['Deck'])
_ = df.set_value(1032,'Deck',value=df.loc[297]['Deck'])
_ = df.set_value(306,'Deck',value='C')
_ = df.set_value(1266,'Deck',value=df.loc[1033]['Deck'])
_ = df.set_value(856,'Deck',value=df.loc[318]['Deck'])
_ = df.set_value(1108,'Deck',value=df.loc[318]['Deck'])
_ = df.set_value(380,'Deck',value='C')
_ = df.set_value(557,'Deck',value='C')
_ = df.set_value(537,'Deck',value='C')
_ = df.set_value(1215,'Deck',value='E')
_ = df.set_value(841,'Deck',value=df.loc[772]['Deck'])
<drop_column> | cash_MONTHS_BALANCE_count_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['cash_MONTHS_BALANCE_count'].mean().rename(columns = {'cash_MONTHS_BALANCE_count': 'cash_MONTHS_BALANCE_count_mean'})
dataset = dataset.merge(cash_MONTHS_BALANCE_count_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_MONTHS_BALANCE_count_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | for i in range(3):
if 'N' in decks_by_class[i]:
decks_by_class[i].remove('N')
if 'T' in decks_by_class[i]:
decks_by_class[i].remove('T' )<define_variables> | cash_MONTHS_BALANCE_mean_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['cash_MONTHS_BALANCE_mean'].mean().rename(columns = {'cash_MONTHS_BALANCE_mean': 'cash_MONTHS_BALANCE_mean_mean'})
dataset = dataset.merge(cash_MONTHS_BALANCE_mean_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_MONTHS_BALANCE_mean_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | weights_by_class = [[],[],[]]
for i,deck_list in enumerate(decks_by_class):
for deck in deck_list:
if i == 0:
class_total = df[(df['Deck']!='N')&(df['Pclass']==i+1)].count() [0]-1
else:
class_total = df[(df['Deck']!='N')&(df['Pclass']==i+1)].count() [0]
deck_total = df[(df['Deck']==deck)&(df['Pclass']==i+1)].count() [0]
weights_by_class[i].append(deck_total/class_total)
print(f'Pclass = {i+1} weights:',np.round(weights_by_class[i],3))<define_variables> | cash_CNT_INSTALMENT_mean_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['cash_CNT_INSTALMENT_mean'].mean().rename(columns = {'cash_CNT_INSTALMENT_mean': 'cash_CNT_INSTALMENT_mean_mean'})
dataset = dataset.merge(cash_CNT_INSTALMENT_mean_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_CNT_INSTALMENT_mean_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | ticket_dict = {}<categorify> | cash_CNT_INSTALMENT_FUTURE_mean_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['cash_CNT_INSTALMENT_FUTURE_mean'].mean().rename(columns = {'cash_CNT_INSTALMENT_FUTURE_mean': 'cash_CNT_INSTALMENT_FUTURE_mean_mean'})
dataset = dataset.merge(cash_CNT_INSTALMENT_FUTURE_mean_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_CNT_INSTALMENT_FUTURE_mean_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | def impute_deck(row):
ticket = row['Ticket']
deck = row['Deck']
pclass = row['Pclass']
if(deck == 'N')and(ticket not in ticket_dict):
if pclass == 1:
deck = list(np.random.choice(decks_by_class[0],size=1,
p=weights_by_class[0])) [0]
elif pclass ==2:
deck = list(np.random.choice(decks_by_class[1],size=1,
p=weights_by_class[1])) [0]
elif pclass ==3:
deck = list(np.random.choice(decks_by_class[2],size=1,
p=weights_by_class[2])) [0]
ticket_dict[ticket] = deck
elif(deck == 'N')and(ticket in ticket_dict):
deck = ticket_dict[ticket]
return deck<feature_engineering> | cash_SK_DPD_mean_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['cash_SK_DPD_mean'].mean().rename(columns = {'cash_SK_DPD_mean': 'cash_SK_DPD_mean_mean'})
dataset = dataset.merge(cash_SK_DPD_mean_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_SK_DPD_mean_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | df['Deck'] = df.apply(impute_deck,axis=1 )<categorify> | cash_SK_DPD_DEF_mean_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['cash_SK_DPD_DEF_mean'].mean().rename(columns = {'cash_SK_DPD_DEF_mean': 'cash_SK_DPD_DEF_mean_mean'})
dataset = dataset.merge(cash_SK_DPD_DEF_mean_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_SK_DPD_DEF_mean_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | df['Deck'] = df['Deck'].map({'F':0,'C':1,'E':2,'G':3,'D':4,'A':5,
'B':6,'T':7} ).astype(int )<prepare_x_and_y> | cash_NAME_CONTRACT_STATUS_Active_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Active'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Active': 'cash_NAME_CONTRACT_STATUS_Active_mean'})
dataset = dataset.merge(cash_NAME_CONTRACT_STATUS_Active_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_NAME_CONTRACT_STATUS_Active_mean
gc.collect() | Home Credit Default Risk |
1,457,238 |
<feature_engineering> | cash_NAME_CONTRACT_STATUS_Amortized_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Amortized debt'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Amortized debt': 'cash_NAME_CONTRACT_STATUS_Amortized_mean'})
dataset = dataset.merge(cash_NAME_CONTRACT_STATUS_Amortized_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_NAME_CONTRACT_STATUS_Amortized_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | df.ix[df.Title == 'Master', "Sex"] = 'boy'
<prepare_x_and_y> | cash_NAME_CONTRACT_STATUS_Approved_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Approved'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Approved': 'cash_NAME_CONTRACT_STATUS_Approved_mean'})
dataset = dataset.merge(cash_NAME_CONTRACT_STATUS_Approved_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_NAME_CONTRACT_STATUS_Approved_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | x = pd.concat([
df.FamilySurvivalRate.fillna(0),
df.SingleTraveler,
df.Sex.replace({'male': 0, 'female': 1, 'boy': 2}),
df.Deck,
], axis=1)
train_x, test_x = x.loc[train.index], x.loc[test.index]
train_y = df.Survived.loc[train.index]<train_on_grid> | cash_NAME_CONTRACT_STATUS_Canceled_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Canceled'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Canceled': 'cash_NAME_CONTRACT_STATUS_Canceled_mean'})
dataset = dataset.merge(cash_NAME_CONTRACT_STATUS_Canceled_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_NAME_CONTRACT_STATUS_Canceled_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | clf_dt = tree.DecisionTreeClassifier()
grid = GridSearchCV(clf_dt, cv=5, param_grid={
'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5]})
grid.fit(train_x, train_y)
grid.best_params_<find_best_params> | cash_NAME_CONTRACT_STATUS_Completed_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Completed'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Completed': 'cash_NAME_CONTRACT_STATUS_Completed_mean'})
dataset = dataset.merge(cash_NAME_CONTRACT_STATUS_Completed_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_NAME_CONTRACT_STATUS_Completed_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | model_dt = grid.best_estimator_<compute_test_metric> | cash_NAME_CONTRACT_STATUS_Demand_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Demand'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Demand': 'cash_NAME_CONTRACT_STATUS_Demand_mean'})
dataset = dataset.merge(cash_NAME_CONTRACT_STATUS_Demand_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_NAME_CONTRACT_STATUS_Demand_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | model_dt.score(train_x, train_y )<save_to_csv> | cash_NAME_CONTRACT_STATUS_Returned_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Returned to the store'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Returned to the store': 'cash_NAME_CONTRACT_STATUS_Returned_mean'})
dataset = dataset.merge(cash_NAME_CONTRACT_STATUS_Returned_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_NAME_CONTRACT_STATUS_Returned_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | test_y = model_dt.predict(test_x ).astype(int)
pd.DataFrame({'Survived': test_y}, index=test.index)\
.reset_index() \
.to_csv(f'submission_dt.csv', index=False )<predict_on_test> | cash_NAME_CONTRACT_STATUS_Signed_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Signed'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Signed': 'cash_NAME_CONTRACT_STATUS_Signed_mean'})
dataset = dataset.merge(cash_NAME_CONTRACT_STATUS_Signed_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_NAME_CONTRACT_STATUS_Signed_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | preds = pd.DataFrame()
preds = x.loc[test.index]
preds['Pclass'] = test['Pclass']
preds['pred'] = model_dt.predict_proba(test_x)[:, 1]
preds = preds.drop(['FamilySurvivalRate','SingleTraveler','Sex', 'Deck'], axis=1)
preds.head()
<count_values> | cash_NAME_CONTRACT_STATUS_XNA_mean = cash_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_XNA'].mean().rename(columns = {'NAME_CONTRACT_STATUS_XNA': 'cash_NAME_CONTRACT_STATUS_XNA_mean'})
dataset = dataset.merge(cash_NAME_CONTRACT_STATUS_XNA_mean, on = 'SK_ID_CURR', how = 'left')
gc.enable()
del cash_NAME_CONTRACT_STATUS_XNA_mean
gc.collect() | Home Credit Default Risk |
1,457,238 | preds.groupby(['Pclass'] ).pred.value_counts()<data_type_conversions> | gc.enable()
del cash, cash_stats_by_prev
gc.collect() | Home Credit Default Risk |
1,457,238 | preds.loc[preds['Pclass'] == 3, 'pred'] = preds.loc[preds['Pclass'] == 3, 'pred'] -0.3
sub = []
sub = preds['pred'].values
sub = np.around(sub ).astype(int )<save_to_csv> | dataset.dtypes.value_counts() | Home Credit Default Risk |
1,457,238 | test_y = sub.astype(int)
pd.DataFrame({'Survived': test_y}, index=test.index)\
.reset_index() \
.to_csv(f'submission_dt_m0.3.csv', index=False )<save_to_csv> | dataset['bureau_DAYS_CREDIT_ENDDATE_max_outlier'] = dataset['bureau_DAYS_CREDIT_ENDDATE_max_outlier'].map({False:0, True:1})
dataset['bureau_DAYS_ENDDATE_FACT_mean_outlier'] = dataset['bureau_DAYS_ENDDATE_FACT_mean_outlier'].map({False:0, True:1} ) | Home Credit Default Risk |
1,457,238 | model_xgb = xgboost.XGBClassifier()
model_xgb.fit(train_x, train_y)
test_y = model_xgb.predict(test_x ).astype(int)
pd.DataFrame({'Survived': test_y}, index=test.index)\
.reset_index() \
.to_csv(f'submission_xgb.csv', index=False )<prepare_x_and_y> | y_temp = dataset[['TARGET']]
X_temp = dataset.drop(['TARGET'], axis=1)
X_big, X_small, y_big, y_small = train_test_split(X_temp, y_temp, test_size=0.2, random_state=1 ) | Home Credit Default Risk |
1,457,238 | X_train = train_x
Y_train = train_y<train_model> | upper_corr = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1 ).astype(np.bool))
upper_corr.head() | Home Credit Default Risk |
1,457,238 | def crossValidation(data,target, model, cv = 5):
ksplits = KFold(n_splits=cv)
errors = []
for j,(indexTrain, indexTest)in enumerate(ksplits.split(data, target)) :
print("This is fold ", j + 1, "of the cross validation")
x_train, y_train = data.iloc[indexTrain, :], target.iloc[indexTrain]
x_test, y_test = data.iloc[indexTest, :], target.iloc[indexTest]
print("Fitting the model")
model.fit(x_train, y_train)
predictions = model.predict(x_test)
errorFold = accuracy_score(y_test, predictions)
errors.append(errorFold)
print("The mean accuracy score over the folds is: ", np.mean(errors))
return<compute_train_metric> | drop_cols = [column for column in upper_corr.columns if any(upper_corr[column] > 0.9)]
print('Columns to remove: ', len(drop_cols)) | Home Credit Default Risk |
1,457,238 | modelXGboost_tuned = xgboost.XGBClassifier(base_score = 0.5,
colsample_bytree= 0.65,
gamma= 0,
learning_rate= 0.5,
max_depth= 3,
min_child_weight= 1,
n_estimators= 120,
scale_pos_weight= 1)
crossValidation(X_train, Y_train, modelXGboost_tuned )<compute_train_metric> | dataset_missing =(dataset.isnull().sum() / len(dataset)).sort_values(ascending = False)
dataset_missing.head(10 ) | Home Credit Default Risk |
1,457,238 | modelK_tuned = KNeighborsClassifier(algorithm='ball_tree', n_neighbors=5, p=1, weights='distance')
crossValidation(X_train, Y_train, modelK_tuned )<compute_train_metric> | dataset_missing = dataset_missing.index[dataset_missing > 0.75]
print('Columns with more than 75% missing values: ', len(dataset_missing)) | Home Credit Default Risk |
1,457,238 | modelForest_tuned = RandomForestClassifier(max_depth =5, n_estimators=25)
crossValidation(X_train, Y_train, modelForest_tuned )<compute_train_metric> | train = dataset[:train_len]
x_test = dataset[train_len:]
train_ids = train['SK_ID_CURR']
test_ids = x_test['SK_ID_CURR']
train.drop(columns=['SK_ID_CURR'], axis = 1, inplace=True)
x_test.drop(columns=['TARGET', 'SK_ID_CURR'], axis = 1, inplace=True ) | Home Credit Default Risk |
1,457,238 | modelLogistic = LogisticRegression(solver="liblinear")
crossValidation(X_train, Y_train, modelLogistic )<compute_train_metric> | train['TARGET'] = train['TARGET'].astype(int)
y_train = train['TARGET']
x_train = train.drop(columns=['TARGET'], axis = 1 ) | Home Credit Default Risk |
1,457,238 | modelXGboost = xgboost.XGBClassifier()
crossValidation(X_train, Y_train, modelXGboost )<compute_train_metric> | feature_imp = np.zeros(x_train.shape[1] ) | Home Credit Default Risk |
1,457,238 | modelDiscriminant = LinearDiscriminantAnalysis()
crossValidation(X_train, Y_train, modelDiscriminant )<compute_train_metric> | model = lgb.LGBMClassifier(objective='binary', boosting_type='goss', n_estimators=10000, class_weight='balanced' ) | Home Credit Default Risk |
1,457,238 | modelForest = RandomForestClassifier()
crossValidation(X_train, Y_train, modelForest )<compute_train_metric> | for i in range(2):
train_x1, train_x2, train_y1, train_y2 = train_test_split(x_train, y_train, test_size = 0.25, random_state = i)
model.fit(train_x1, train_y1, early_stopping_rounds=100, eval_set = [(train_x2, train_y2)], eval_metric = 'auc', verbose = 200)
feature_imp += model.feature_importances_ | Home Credit Default Risk |
1,457,238 | modelK = KNeighborsClassifier()
crossValidation(X_train, Y_train, modelK )<compute_test_metric> | zero_imp = list(feature_imp[feature_imp['importance'] == 0.0]['feature'])
print('count of features with 0 importance: ', len(zero_imp))
feature_imp.tail(10 ) | Home Credit Default Risk |
1,457,238 | crossValidation(X_train, Y_train, model_dt )<compute_train_metric> | x_train = x_train.drop(columns = zero_imp)
x_test = x_test.drop(columns = zero_imp ) | Home Credit Default Risk |
1,457,238 | def crossValMixed(X, y, model,cv = 5):
n_folds = KFold(n_splits = cv,shuffle=True)
counter = 0
errorMetr = []
for indexTrain, indexTest in n_folds.split(X,y):
print("This is fold: ", counter, "of the cross validation")
X_train, y_train = X.iloc[indexTrain, :], y.iloc[indexTrain]
X_test, y_test = X.iloc[indexTest, :], y.iloc[indexTest]
print("Fitting the model")
predictions = model.stackingActive(X_train, y_train, X_test)
print("The metric in the fold ", counter, "is: ", accuracy_score(y_test,predictions))
counter += 1
errorMetr.append(accuracy_score(y_test,predictions))
print("The mean absolute error over the ", cv, "folds is: ", np.mean(errorMetr))
return predictions
<save_to_csv> | test_predictions = np.zeros(x_test.shape[0])
out_of_fold = np.zeros(x_train.shape[0])
valid_scores = []
train_scores = [] | Home Credit Default Risk |
1,457,238 | pd.DataFrame({'Survived': predictions.astype(int)}, index=test.index)\
.reset_index() \
.to_csv(f'submission_stack_rkd.csv', index=False )<save_to_csv> | k_fold = KFold(n_splits = 5, shuffle = False, random_state = 50 ) | Home Credit Default Risk |
1,457,238 | pd.DataFrame({'Survived': predictions.astype(int)}, index=test.index)\
.reset_index() \
.to_csv(f'submission_stack_xkx_tuned.csv', index=False )<prepare_output> | x_train = np.array(x_train)
x_test = np.array(x_test ) | Home Credit Default Risk |
1,457,238 | preds = pd.DataFrame()
preds = x.loc[test.index]
preds['Pclass'] = test['Pclass']
preds['pred'] = predictions_proba[:, 1]
preds.head()<data_type_conversions> | for train_indices, valid_indices in k_fold.split(x_train):
train_features, train_labels = x_train[train_indices], y_train[train_indices]
valid_features, valid_labels = x_train[valid_indices], y_train[valid_indices]
model = lgb.LGBMClassifier(n_estimators=10000, objective = 'binary', boosting_type='goss',class_weight = 'balanced',
learning_rate = 0.05, reg_alpha = 0.1, reg_lambda = 0.1, n_jobs = -1, random_state = 50)
model.fit(train_features, train_labels, eval_metric = 'auc',
eval_set = [(valid_features, valid_labels),(train_features, train_labels)],
eval_names = ['valid', 'train'], early_stopping_rounds = 100, verbose = 200)
best_iteration = model.best_iteration_
test_predictions += model.predict_proba(x_test, num_iteration = best_iteration)[:, 1] / k_fold.n_splits
out_of_fold[valid_indices] = model.predict_proba(valid_features, num_iteration = best_iteration)[:, 1]
valid_score = model.best_score_['valid']['auc']
train_score = model.best_score_['train']['auc']
valid_scores.append(valid_score)
train_scores.append(train_score)
gc.enable()
del model, train_features, valid_features
gc.collect() | Home Credit Default Risk |
1,457,238 | preds.loc[preds['Pclass'] == 3, 'pred'] = preds.loc[preds['Pclass'] == 3, 'pred'] -0.2
sub = []
sub = preds['pred'].values
sub = np.around(sub ).astype(int )<save_to_csv> | valid_auc = roc_auc_score(y_train, out_of_fold)
valid_scores.append(valid_auc)
train_scores.append(np.mean(train_scores))
fold_names = list(range(5))
fold_names.append('overall')
metrics = pd.DataFrame({'fold': fold_names, 'train': train_scores, 'valid': valid_scores} ) | Home Credit Default Risk |
1,457,238 | pd.DataFrame({'Survived': sub}, index=test.index)\
.reset_index() \
.to_csv(f'submission_stack_xkx_m.csv', index=False )<set_options> | submission = pd.DataFrame({'SK_ID_CURR': test_ids, 'TARGET': test_predictions})
submission.to_csv('submission.csv', index = False ) | Home Credit Default Risk |
1,438,465 | plt.style.use('seaborn')
sns.set(font_scale=2.5)
warnings.filterwarnings('ignore')
%matplotlib inline<load_from_csv> | def geometric_mean(x):
return np.exp(np.log(x[x>0] ).mean())
def one_hot_encoder(df, nan_as_category = True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
def application_train_test(num_rows = None, nan_as_category = False):
df = pd.read_csv('.. /input/application_train.csv', nrows= num_rows)
test_df = pd.read_csv('.. /input/application_test.csv', nrows= num_rows)
print("Train samples: {}, test samples: {}".format(len(df), len(test_df)))
df = df.append(test_df ).reset_index()
df = df[df['CODE_GENDER'] != 'XNA']
docs = [_f for _f in df.columns if 'FLAG_DOC' in _f]
live = [_f for _f in df.columns if('FLAG_' in _f)&('FLAG_DOC' not in _f)&('_FLAG_' not in _f)]
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)
inc_by_org = df[['AMT_INCOME_TOTAL', 'ORGANIZATION_TYPE']].groupby('ORGANIZATION_TYPE' ).median() ['AMT_INCOME_TOTAL']
df['NEW_CREDIT_TO_ANNUITY_RATIO'] = df['AMT_CREDIT'] / df['AMT_ANNUITY']
df['NEW_CREDIT_TO_GOODS_RATIO'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE']
df['NEW_DOC_IND_KURT'] = df[docs].kurtosis(axis=1)
df['NEW_LIVE_IND_SUM'] = df[live].sum(axis=1)
df['NEW_INC_PER_CHLD'] = df['AMT_INCOME_TOTAL'] /(1 + df['CNT_CHILDREN'])
df['NEW_INC_BY_ORG'] = df['ORGANIZATION_TYPE'].map(inc_by_org)
df['NEW_EMPLOY_TO_BIRTH_RATIO'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['NEW_EMPLOY_TO_BIRTH-18_RATIO'] = df['DAYS_EMPLOYED'] /(df['DAYS_BIRTH'] + 18*365)
df['NEW_BIRTH_TO_EMPLOY_RATIO'] = df['DAYS_BIRTH'] /(1 + df['DAYS_EMPLOYED'])
df['NEW_INCOME_TO_ANNUITY_RATIO'] = df['AMT_INCOME_TOTAL'] /(1 + df['AMT_ANNUITY'])
df['NEW_ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] /(1 + df['AMT_INCOME_TOTAL'])
df['NEW_EXT_SOURCES_MEDIAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].median(axis=1, skipna=True)
df['NEW_EXT_SOURCES_MEAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1, skipna=True)
df['NEW_EXT_SOURCES_PROD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].prod(axis=1, skipna=True, min_count=1)
df['NEW_EXT_SOURCES_MAX'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].max(axis=1, skipna=True)
df['NEW_EXT_SOURCES_MIN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].min(axis=1, skipna=True)
df['NEW_EXT_SOURCES_STD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1, skipna=True)
df['NEW_EXT_SOURCES_MAD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mad(axis=1, skipna=True)
df['NEW_EXT_SOURCES_GEO'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].apply(geometric_mean, axis=1)
df['NEW_CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH']
df['NEW_CAR_TO_EMPLOY_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['NEW_PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
df['NEW_PHONE_TO_EMPLOYED_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']
df['NEW_CREDIT_TO_INCOME_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
df['NEW_PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']
df['NEW_INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']
df['NEW_INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = pd.factorize(df[bin_feature])
df, cat_cols = one_hot_encoder(df, nan_as_category)
dropcolum=['FLAG_DOCUMENT_2','FLAG_DOCUMENT_4',
'FLAG_DOCUMENT_5','FLAG_DOCUMENT_6','FLAG_DOCUMENT_7',
'FLAG_DOCUMENT_8','FLAG_DOCUMENT_9','FLAG_DOCUMENT_10',
'FLAG_DOCUMENT_11','FLAG_DOCUMENT_12','FLAG_DOCUMENT_13',
'FLAG_DOCUMENT_14','FLAG_DOCUMENT_15','FLAG_DOCUMENT_16',
'FLAG_DOCUMENT_17','FLAG_DOCUMENT_18','FLAG_DOCUMENT_19',
'FLAG_DOCUMENT_20','FLAG_DOCUMENT_21']
df= df.drop(dropcolum,axis=1)
del test_df
gc.collect()
return df
def bureau_and_balance(num_rows = None, nan_as_category = True):
bureau = pd.read_csv('.. /input/bureau.csv', nrows = num_rows)
bb = pd.read_csv('.. /input/bureau_balance.csv', nrows = num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU' ).agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist() ])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(['SK_ID_BUREAU'], axis=1, inplace= True)
del bb, bb_agg
gc.collect()
num_aggregations = {
'DAYS_CREDIT': [ 'mean', 'var'],
'DAYS_CREDIT_ENDDATE': [ 'mean'],
'DAYS_CREDIT_UPDATE': ['mean'],
'CREDIT_DAY_OVERDUE': ['mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'AMT_CREDIT_SUM': [ 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': [ 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
}
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist() ])
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR' ).agg(num_aggregations)
active_agg.columns = pd.Index(['ACTIVE_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')
del active, active_agg
gc.collect()
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR' ).agg(num_aggregations)
closed_agg.columns = pd.Index(['CLOSED_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')
del closed, closed_agg, bureau
gc.collect()
return bureau_agg
def previous_applications(num_rows = None, nan_as_category = True):
prev = pd.read_csv('.. /input/previous_application.csv', nrows = num_rows)
prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']
num_aggregations = {
'AMT_ANNUITY': [ 'max', 'mean'],
'AMT_APPLICATION': [ 'max','mean'],
'AMT_CREDIT': [ 'max', 'mean'],
'APP_CREDIT_PERC': [ 'max', 'mean'],
'AMT_DOWN_PAYMENT': [ 'max', 'mean'],
'AMT_GOODS_PRICE': [ 'max', 'mean'],
'HOUR_APPR_PROCESS_START': [ 'max', 'mean'],
'RATE_DOWN_PAYMENT': [ 'max', 'mean'],
'DAYS_DECISION': [ 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
}
cat_aggregations = {}
for cat in cat_cols:
cat_aggregations[cat] = ['mean']
prev_agg = prev.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist() ])
approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
approved_agg = approved.groupby('SK_ID_CURR' ).agg(num_aggregations)
approved_agg.columns = pd.Index(['APPROVED_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist() ])
prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR')
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
refused_agg = refused.groupby('SK_ID_CURR' ).agg(num_aggregations)
refused_agg.columns = pd.Index(['REFUSED_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist() ])
prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR')
del refused, refused_agg, approved, approved_agg, prev
gc.collect()
return prev_agg
def pos_cash(num_rows = None, nan_as_category = True):
pos = pd.read_csv('.. /input/POS_CASH_balance.csv', nrows = num_rows)
pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)
aggregations = {
'MONTHS_BALANCE': ['max', 'mean', 'size'],
'SK_DPD': ['max', 'mean'],
'SK_DPD_DEF': ['max', 'mean']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
pos_agg = pos.groupby('SK_ID_CURR' ).agg(aggregations)
pos_agg.columns = pd.Index(['POS_' + e[0] + "_" + e[1].upper() for e in pos_agg.columns.tolist() ])
pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR' ).size()
del pos
gc.collect()
return pos_agg
def installments_payments(num_rows = None, nan_as_category = True):
ins = pd.read_csv('.. /input/installments_payments.csv', nrows = num_rows)
ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)
ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']
ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']
ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']
ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']
ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)
ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)
aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DPD': ['max', 'mean', 'sum'],
'DBD': ['max', 'mean', 'sum'],
'PAYMENT_PERC': [ 'mean', 'var'],
'PAYMENT_DIFF': [ 'mean', 'var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
ins_agg = ins.groupby('SK_ID_CURR' ).agg(aggregations)
ins_agg.columns = pd.Index(['INSTAL_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist() ])
ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR' ).size()
del ins
gc.collect()
return ins_agg
def credit_card_balance(num_rows = None, nan_as_category = True):
cc = pd.read_csv('.. /input/credit_card_balance.csv', nrows = num_rows)
cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)
cc.drop(['SK_ID_PREV'], axis= 1, inplace = True)
cc_agg = cc.groupby('SK_ID_CURR' ).agg([ 'max', 'mean', 'sum', 'var'])
cc_agg.columns = pd.Index(['CC_' + e[0] + "_" + e[1].upper() for e in cc_agg.columns.tolist() ])
cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR' ).size()
del cc
gc.collect()
return cc_agg
| Home Credit Default Risk |
1,438,465 | df_train = pd.read_csv('.. /input/train.csv')
df_test = pd.read_csv('.. /input/test.csv')
df_train[['Ticket', 'Survived']].groupby(['Survived'], as_index=True ).count()
df_train[['Ticket', 'Pclass']].groupby(['Pclass'], as_index=True ).count()<count_missing_values> | def kfold_lightgbm(train_df, train_target, test_df, num_folds, stratified=False, debug=False):
print("Starting LightGBM.Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape))
if stratified:
folds = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=47)
else:
folds = KFold(n_splits= num_folds, shuffle=True, random_state=47)
oof_preds = np.zeros(train_df.shape[0])
sub_preds = np.zeros(test_df.shape[0])
feat_importance = pd.DataFrame()
scores = []
models = []
for n_fold,(train_idx, valid_idx)in enumerate(folds.split(train_df, train_target)) :
train_x, train_y = train_df.iloc[train_idx], train_target.iloc[train_idx]
valid_x, valid_y = train_df.iloc[valid_idx], train_target.iloc[valid_idx]
clf = LGBMClassifier(
nthread=4,
n_estimators=10000,
learning_rate=0.02,
num_leaves=32,
colsample_bytree=0.9497036,
subsample=0.8715623,
max_depth=8,
reg_alpha=0.04,
reg_lambda=0.073,
min_split_gain=0.0222415,
min_child_weight=40,
silent=-1,
verbose=-1,
)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)],
eval_metric= 'auc', verbose= 1000, early_stopping_rounds= 300)
oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test_df, num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = test_df.columns.values
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["shap_values"] = abs(shap.TreeExplainer(clf ).shap_values(valid_x)[:,:test_df.shape[1]] ).mean(axis=0 ).T
fold_importance_df["fold"] = n_fold + 1
feat_importance = pd.concat([feat_importance, fold_importance_df], axis=0)
scores.append(roc_auc_score(valid_y, oof_preds[valid_idx]))
print('Fold %2d AUC : %.6f' %(n_fold + 1, scores[n_fold]))
models.append(clf)
del clf, train_x, train_y, valid_x, valid_y, fold_importance_df
gc.collect()
score = roc_auc_score(train_target, oof_preds)
print('Full AUC score %.6f' % score)
print('Mean AUC score %.6f' % np.mean(scores))
if not debug:
pd.DataFrame(oof_preds ).to_csv("lgb{:03}_{:.5f}_train_oof.csv".format(test_df.shape[1], score), index=False)
sub_df = pd.read_csv('.. /input/sample_submission.csv')
sub_df['TARGET'] = sub_preds
sub_df.to_csv("lgb{:03}_{:.5f}.csv".format(test_df.shape[1], score), index= False)
display_shapley_values(feat_importance)
return feat_importance, models, scores
def display_importances(feat_importance):
best_features = feat_importance[["feature", "importance"]].groupby("feature")["importance"].agg(['mean', 'std'])\
.sort_values(by="mean", ascending=False ).head(40 ).reset_index()
best_features.columns = ["feature", "mean importance", "err"]
plt.figure(figsize=(8, 10))
sns.barplot(x="mean importance", y="feature", xerr=best_features['err'], data=best_features)
plt.title('LightGBM Features(avg over folds)')
plt.tight_layout()
plt.show()
def display_shapley_values(feat_importance):
best_features = feat_importance[["feature", "shap_values"]].groupby("feature")["shap_values"].agg(['mean', 'std'])\
.sort_values(by="mean", ascending=False ).head(40 ).reset_index()
best_features.columns = ["feature", "mean shapley values", "err"]
plt.figure(figsize=(8, 10))
sns.barplot(x="mean shapley values", y="feature", xerr=best_features['err'], data=best_features)
plt.title('LightGBM shapley values(avg over folds)')
plt.tight_layout()
plt.show()
| Home Credit Default Risk |
1,438,465 | for col in df_train.columns:
msg = 'column: {:>10}\t Percent of NaN value: {:.2f}%'.format(col, 100 *(df_train[col].isnull().sum() / df_train[col].shape[0]))
print(msg )<count_missing_values> | %%time
debug = False
num_rows = 10000 if debug else None
scores = {}
df = application_train_test(num_rows)
with timer("Process bureau and bureau_balance"):
bureau = bureau_and_balance(num_rows)
print("Bureau df shape:", bureau.shape)
df = df.join(bureau, how='left', on='SK_ID_CURR')
del bureau
gc.collect()
with timer("Process previous_applications"):
prev = previous_applications(num_rows)
print("Previous applications df shape:", prev.shape)
df = df.join(prev, how='left', on='SK_ID_CURR')
del prev
gc.collect()
with timer("Process POS-CASH balance"):
pos = pos_cash(num_rows)
print("Pos-cash balance df shape:", pos.shape)
df = df.join(pos, how='left', on='SK_ID_CURR')
del pos
gc.collect()
with timer("Process installments payments"):
ins = installments_payments(num_rows)
print("Installments payments df shape:", ins.shape)
df = df.join(ins, how='left', on='SK_ID_CURR')
del ins
gc.collect()
with timer("Process credit card balance"):
cc = credit_card_balance(num_rows)
print("Credit card balance df shape:", cc.shape)
df = df.join(cc, how='left', on='SK_ID_CURR')
del cc
gc.collect()
with timer("Save df"):
df.to_csv('merged_df.csv.gz', compression='gzip', index=False)
with timer("Divide in training and test data"):
feats = [f for f in df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]
train_df = df[df['TARGET'].notnull() ][feats]
train_target = df[df['TARGET'].notnull() ]['TARGET']
test_df = df[df['TARGET'].isnull() ][feats]
del df
gc.collect() | Home Credit Default Risk |
1,438,465 | for col in df_test.columns:
msg = 'column: {:>10}\t Percent of NaN value: {:.2f}%'.format(col, 100 *(df_test[col].isnull().sum() / df_test[col].shape[0]))
print(msg )<count_missing_values> | %%time
feat_importance, models, scores = kfold_lightgbm(train_df, train_target, test_df, num_folds=5, stratified=False, debug=debug ) | Home Credit Default Risk |
1,438,465 | for col in df_test.columns:
msg = 'column: {:>10}\t Percent of NaN value: {:.2f}%'.format(col, 100 *(df_test[col].isnull().sum() / df_test[col].shape[0]))
print(msg )<feature_engineering> | def inv_logit(p): return np.exp(p)/(1 + np.exp(p))
base_value = shap_values[0,-1]
output = base_value + np.sum(shap_values[0,:-1])
print('Log-odds output:', output, ' Logistic output:', inv_logit(output)) | Home Credit Default Risk |
1,438,465 | <feature_engineering><EOS> | percentile = 0.15
best_features = feat_importance[["feature", "shap_values"]].groupby("feature")["shap_values"].agg(['mean'])\
.sort_values(by="mean", ascending=False ).reset_index()
best_features = best_features[:int(best_features.shape[0]*percentile)]["feature"].values
print("
with timer("Run LightGBM with kfold"):
train_df = train_df[best_features]
test_df = test_df[best_features]
feat_importance, models, scores = kfold_lightgbm(train_df, train_target, test_df, num_folds=5, stratified=False, debug=debug ) | Home Credit Default Risk |
1,289,226 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<categorify> | plt.rcParams['font.size'] = 18
plt.style.use('fivethirtyeight')
%matplotlib inline | Home Credit Default Risk |
1,289,226 | df_train['Initial'].replace(['Mlle','Mme','Ms','Dr','Major','Lady','Countess','Jonkheer','Col','Rev','Capt','Sir','Don', 'Dona'],
['Miss','Miss','Miss','Mr','Mr','Mrs','Mrs','Other','Other','Other','Mr','Mr','Mr', 'Mr'],inplace=True)
df_test['Initial'].replace(['Mlle','Mme','Ms','Dr','Major','Lady','Countess','Jonkheer','Col','Rev','Capt','Sir','Don', 'Dona'],
['Miss','Miss','Miss','Mr','Mr','Mrs','Mrs','Other','Other','Other','Mr','Mr','Mr', 'Mr'],inplace=True )<feature_engineering> | random = pd.read_csv('.. /input/home-credit-model-tuning/random_search_simple.csv' ).sort_values('score', ascending = False ).reset_index()
opt = pd.read_csv('.. /input/home-credit-model-tuning/bayesian_trials_simple.csv' ).sort_values('score', ascending = False ).reset_index()
print('Best score from random search: {:.5f} found on iteration: {}.'.format(random.loc[0, 'score'], random.loc[0, 'iteration']))
print('Best score from bayesian optimization: {:.5f} found on iteration: {}.'.format(opt.loc[0, 'score'], opt.loc[0, 'iteration'])) | Home Credit Default Risk |
1,289,226 | df_train.loc[(df_train.Age.isnull())&(df_train.Initial=='Mr'),'Age'] = 33
df_train.loc[(df_train.Age.isnull())&(df_train.Initial=='Mrs'),'Age'] = 36
df_train.loc[(df_train.Age.isnull())&(df_train.Initial=='Master'),'Age'] = 5
df_train.loc[(df_train.Age.isnull())&(df_train.Initial=='Miss'),'Age'] = 22
df_train.loc[(df_train.Age.isnull())&(df_train.Initial=='Other'),'Age'] = 46
df_test.loc[(df_test.Age.isnull())&(df_test.Initial=='Mr'),'Age'] = 33
df_test.loc[(df_test.Age.isnull())&(df_test.Initial=='Mrs'),'Age'] = 36
df_test.loc[(df_test.Age.isnull())&(df_test.Initial=='Master'),'Age'] = 5
df_test.loc[(df_test.Age.isnull())&(df_test.Initial=='Miss'),'Age'] = 22
df_test.loc[(df_test.Age.isnull())&(df_test.Initial=='Other'),'Age'] = 46<data_type_conversions> | keys = []
for key, value in ast.literal_eval(random.loc[0, 'hyperparameters'] ).items() :
print(f'{key}: {value}')
keys.append(key ) | Home Credit Default Risk |
1,289,226 | df_train['Embarked'].fillna('S', inplace=True )<feature_engineering> | for key in keys:
print('{}: {}'.format(key, ast.literal_eval(opt.loc[0, 'hyperparameters'])[key])) | Home Credit Default Risk |
1,289,226 | df_train['Age_cat'] = 0
df_train.loc[df_train['Age'] < 20, 'Age_cat'] = 0
df_train.loc[(20 <= df_train['Age'])&(df_train['Age'] < 26), 'Age_cat'] = 1
df_train.loc[(26 <= df_train['Age'])&(df_train['Age'] < 33), 'Age_cat'] = 2
df_train.loc[(33 <= df_train['Age'])&(df_train['Age'] < 39), 'Age_cat'] = 3
df_train.loc[(39 <= df_train['Age']), 'Age_cat'] = 4
df_test['Age_cat'] = 0
df_test.loc[df_test['Age'] < 20, 'Age_cat'] = 0
df_test.loc[(20 <= df_test['Age'])&(df_test['Age'] < 26), 'Age_cat'] = 1
df_test.loc[(26 <= df_test['Age'])&(df_test['Age'] < 33), 'Age_cat'] = 2
df_test.loc[(33 <= df_test['Age'])&(df_test['Age'] < 39), 'Age_cat'] = 3
df_test.loc[(39 <= df_test['Age']), 'Age_cat'] = 4<feature_engineering> | random['set'] = 'random'
scores = random[['score', 'iteration', 'set']]
opt['set'] = 'opt'
scores = scores.append(opt[['set', 'iteration', 'score']], sort = True)
scores.head() | Home Credit Default Risk |
1,289,226 |
<count_values> | scores.groupby('set')['score'].agg(['mean', 'max', 'min', 'std', 'count'] ) | Home Credit Default Risk |
1,289,226 | print(df_train['Age_cat'].value_counts() )<categorify> | random_fit = np.polyfit(random['iteration'], random['score'], 1)
print('Random search slope: {:.8f}'.format(random_fit[0])) | Home Credit Default Risk |
1,289,226 | df_train['Initial'] = df_train['Initial'].map({'Master': 0, 'Miss': 1, 'Mr': 2, 'Mrs': 3, 'Other': 4})
df_test['Initial'] = df_test['Initial'].map({'Master': 0, 'Miss': 1, 'Mr': 2, 'Mrs': 3, 'Other': 4} )<categorify> | opt_fit = np.polyfit(opt['iteration'], opt['score'], 1)
print('opt search slope: {:.8f}'.format(opt_fit[0])) | Home Credit Default Risk |
1,289,226 | df_train['Embarked'] = df_train['Embarked'].map({'C': 0, 'Q': 1, 'S': 2})
df_test['Embarked'] = df_test['Embarked'].map({'C': 0, 'Q': 1, 'S': 2})
<categorify> | opt_fit[0] / random_fit[0] | Home Credit Default Risk |
1,289,226 | df_train['Sex'] = df_train['Sex'].map({'female': 0, 'male': 1})
df_test['Sex'] = df_test['Sex'].map({'female': 0, 'male': 1})
<categorify> | print('After 10,000 iterations, the random score is: {:.5f}.'.format(
random_fit[0] * 1e5 + random_fit[1])) | Home Credit Default Risk |
1,289,226 | df_train = pd.get_dummies(df_train, columns=['Age_cat'], prefix='Age_cat')
df_test = pd.get_dummies(df_test, columns=['Age_cat'], prefix='Age_cat' )<categorify> | print('After 10,000 iterations, the bayesian score is: {:.5f}.'.format(
opt_fit[0] * 1e5 + opt_fit[1])) | Home Credit Default Risk |
1,289,226 | df_train = pd.get_dummies(df_train, columns=['Initial'], prefix='Initial')
df_test = pd.get_dummies(df_test, columns=['Initial'], prefix='Initial' )<categorify> | def process(results):
results = results.copy()
results['hyperparameters'] = results['hyperparameters'].map(ast.literal_eval)
results = results.sort_values('score', ascending = False ).reset_index(drop = True)
hyp_df = pd.DataFrame(columns = list(results.loc[0, 'hyperparameters'].keys()))
for i, hyp in enumerate(results['hyperparameters']):
hyp_df = hyp_df.append(pd.DataFrame(hyp, index = [0]),
ignore_index = True, sort= True)
hyp_df['iteration'] = results['iteration']
hyp_df['score'] = results['score']
return hyp_df | Home Credit Default Risk |
1,289,226 | df_train = pd.get_dummies(df_train, columns=['Embarked'], prefix='Embarked')
df_test = pd.get_dummies(df_test, columns=['Embarked'], prefix='Embarked' )<drop_column> | random_hyp = process(random)
opt_hyp = process(opt)
random_hyp.head() | Home Credit Default Risk |
1,289,226 | df_train.drop(['PassengerId', 'Name', 'SibSp', 'Parch', 'Ticket', 'Cabin'], axis=1, inplace=True)
df_test.drop(['PassengerId', 'Name', 'SibSp', 'Parch', 'Ticket', 'Cabin'], axis=1, inplace=True)
<import_modules> | param_grid = {
'is_unbalance': [True, False],
'boosting_type': ['gbdt', 'goss', 'dart'],
'num_leaves': list(range(20, 150)) ,
'learning_rate': list(np.logspace(np.log10(0.005), np.log10(0.5), base = 10, num = 1000)) ,
'subsample_for_bin': list(range(20000, 300000, 20000)) ,
'min_child_samples': list(range(20, 500, 5)) ,
'reg_alpha': list(np.linspace(0, 1)) ,
'reg_lambda': list(np.linspace(0, 1)) ,
'colsample_bytree': list(np.linspace(0.6, 1, 10)) ,
'subsample': list(np.linspace(0.5, 1, 100))
} | Home Credit Default Risk |
1,289,226 | from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.model_selection import train_test_split<prepare_x_and_y> | best_random_hyp = random_hyp.loc[0, :]
best_opt_hyp = opt_hyp.loc[0, :] | Home Credit Default Risk |
1,289,226 | X_train = df_train.drop('Survived', axis=1 ).values
target_label = df_train['Survived'].values
X_test = df_test.values<split> | random_hyp.groupby('boosting_type')['score'].agg(['mean', 'max', 'min', 'std', 'count'] ) | Home Credit Default Risk |
1,289,226 | X_tr, X_vld, y_tr, y_vld = train_test_split(X_train, target_label, test_size=0.34, random_state=900 )<import_modules> | opt_hyp.groupby('boosting_type')['score'].agg(['mean', 'max', 'min', 'std', 'count'] ) | Home Credit Default Risk |
1,289,226 | from sklearn.metrics import accuracy_score<train_model> | random_hyp.groupby('is_unbalance')['score'].agg(['mean', 'max', 'min', 'std', 'count'] ) | Home Credit Default Risk |
1,289,226 | model = GradientBoostingClassifier()
model.fit(X_tr,y_tr)
pred = model.predict(X_vld )<compute_test_metric> | opt_hyp.groupby('is_unbalance')['score'].agg(['mean', 'max', 'min', 'std', 'count'] ) | Home Credit Default Risk |
1,289,226 | print('총 {}명 중 {:.2f}% 정확도로 생존을 맞춤'.format(y_vld.shape[0], 100 * metrics.accuracy_score(pred, y_vld)) )<load_from_csv> | random_hyp['set'] = 'Random Search'
opt_hyp['set'] = 'Bayesian'
hyp = random_hyp.append(opt_hyp, ignore_index = True, sort = True)
hyp.head() | Home Credit Default Risk |
1,289,226 | submission = pd.read_csv('.. /input/sample_submission.csv' )<predict_on_test> | plt.rcParams['axes.labelpad'] = 12 | Home Credit Default Risk |
1,289,226 | prediction = model.predict(X_test)
submission['Survived'] = prediction<save_to_csv> | random_hyp['n_estimators'] = random_hyp['n_estimators'].astype(np.int32)
random_hyp.corr() ['score'] | Home Credit Default Risk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.