kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
1,136,016 | sn_mse = SigmoidNeuron()
sn_mse.fit(X_scaled_train, Y_train, epochs=100, learning_rate=0.015, loss_fn="mse", display_loss=True )<train_model> | gc.enable()
del POS_CASH_balance
del pos_cash_mean
gc.collect() | Home Credit Default Risk |
1,136,016 | sn_ce = SigmoidNeuron()
sn_ce.fit(X_scaled_train, Y_train, epochs=10000, learning_rate=0.000005, loss_fn="ce", display_loss=True )<predict_on_test> | bureau = pd.read_csv('.. /input/bureau.csv')
bureau.head() | Home Credit Default Risk |
1,136,016 | def print_accuracy(sn):
Y_pred_train = sn.predict(X_scaled_train)
Y_pred_binarised_train =(Y_pred_train >= 0.5 ).astype("int" ).ravel()
accuracy_train = accuracy_score(Y_pred_binarised_train, Y_train)
print("Train Accuracy : ", accuracy_train)
print("-"*50 )<compute_test_metric> | bureau_mean = bureau.groupby('SK_ID_CURR' ).mean()
bureau_mean['buro_count'] = bureau[['SK_ID_CURR', 'SK_ID_BUREAU']].groupby('SK_ID_CURR' ).count() ['SK_ID_BUREAU']
bureau_mean.columns = ['b_' + col for col in bureau_mean.columns]
X = X.merge(right=bureau_mean.reset_index() , how='left', on='SK_ID_CURR')
X.shape | Home Credit Default Risk |
1,136,016 | print_accuracy(sn_ce )<create_dataframe> | gc.enable()
del bureau
del bureau_mean
gc.collect()
| Home Credit Default Risk |
1,136,016 |
<save_to_csv> | previous_application = pd.read_csv('.. /input/previous_application.csv')
previous_application.head() | Home Credit Default Risk |
1,136,016 | Y_pred_test = sn_ce.predict(X_scaled_test)
Y_pred_binarised_test =(Y_pred_test >= 0.5 ).astype("int" ).ravel()
submission = {}
submission['ImageId'] = ID_test
submission['Class'] = Y_pred_binarised_test
submission = pd.DataFrame(submission)
submission = submission[['ImageId', 'Class']]
submission = submission.sort_values(['ImageId'])
submission.to_csv("submisision.csv", index=False )<load_from_csv> | previous_app_mean = previous_application.groupby('SK_ID_CURR' ).mean()
previous_app_mean['SK_ID_PREV'] = previous_application[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() ['SK_ID_PREV']
previous_app_mean.columns = ['pa_' + col for col in previous_app_mean.columns]
previous_app_mean.head() | Home Credit Default Risk |
1,136,016 | train = pd.read_csv('.. /input/csm6420-workshop/train.csv')
test = pd.read_csv('.. /input/csm6420-workshop/test.csv',index_col=0)
print(train.head())
print(test.head())
<prepare_x_and_y> | X = X.merge(right=previous_app_mean.reset_index() , how='left', on='SK_ID_CURR')
X.shape | Home Credit Default Risk |
1,136,016 | X = train.drop('Class', axis=1)
y = train['Class']
print(y )<find_best_model_class> | gc.enable()
del previous_app_mean
del previous_application
gc.collect() | Home Credit Default Risk |
1,136,016 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)
model = GaussianNB()
y_pred = model.fit(X_train, y_train ).predict(X_test)
print("Number of mislabeled points out of a total %d points : %d" %(X_test.shape[0],(y_test != y_pred ).sum()))<load_from_csv> | installments_payments = pd.read_csv('.. /input/installments_payments.csv')
installments_payments.head() | Home Credit Default Risk |
1,136,016 | sample = pd.read_csv('.. /input/csm6420-workshop/sampleSubmission.csv')
print(sample.head() )<save_to_csv> | install_pay_mean = installments_payments.groupby('SK_ID_CURR' ).mean()
install_pay_mean['SK_ID_PREV'] = installments_payments[['SK_ID_CURR','SK_ID_PREV']].groupby('SK_ID_CURR' ).count() ['SK_ID_PREV']
install_pay_mean.columns = ['ip_' + col for col in install_pay_mean.columns]
X = X.merge(right=install_pay_mean.reset_index() , how='left', on='SK_ID_CURR' ) | Home Credit Default Risk |
1,136,016 | y_pred = model.predict(test.values)
results = pd.DataFrame()
results["TestId"] = test.index.values
results["PredictedScore"]= y_pred
results.to_csv("submission.csv", index=False )<import_modules> | gc.enable()
del installments_payments
del install_pay_mean
gc.collect() | Home Credit Default Risk |
1,136,016 | from fastai.vision import *<load_from_csv> | X_train, X_test, y_train, y_test = train_test_split(df_train, y, test_size=0.2, random_state=123 ) | Home Credit Default Risk |
1,136,016 | path = Path(".. /input")
train_path = path/"train/train"
test_path = path/"test/test"
sub_df = pd.read_csv(f"{path}/sample_submission.csv")
test_df = pd.read_csv(f"{path}/sample_submission.csv" )<feature_engineering> | lgb_train = lgb.Dataset(data=X_train, label=y_train)
lgb_test = lgb.Dataset(data=X_test, label=y_test ) | Home Credit Default Risk |
1,136,016 | train = get_image_files(train_path)
test = get_image_files(test_path )<load_pretrained> | params = {'task': 'train', 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': 'auc',
'learning_rate': 0.01, 'num_leaves': 48, 'num_iteration': 5000, 'verbose': 0 ,
'colsample_bytree':.8, 'subsample':.9, 'max_depth':7, 'reg_alpha':.1, 'reg_lambda':.1,
'min_split_gain':.01, 'min_child_weight':1}
model = lgb.train(params, lgb_train, valid_sets=lgb_test, early_stopping_rounds=150, verbose_eval=200 ) | Home Credit Default Risk |
1,136,016 | data= ImageDataBunch.from_folder(train_path,valid_pct = 0.2,test = test_path,ds_tfms = get_transforms() ,size = 224 ).normalize()
data.add_test(ImageList.from_df(test_df, path, folder="test/test"))<define_variables> | lgb_preds = model.predict(df_test ) | Home Credit Default Risk |
1,136,016 | data.show_batch()<choose_model_class> | df_test['TARGET'] = lgb_preds | Home Credit Default Risk |
1,136,016 | <find_best_params><EOS> | lgb_pred = df_test[['SK_ID_CURR', 'TARGET']].to_csv('LGB_prediction2.csv', index=False ) | Home Credit Default Risk |
1,304,215 | <define_search_space><EOS> | import pandas as pd
import numpy as np | Home Credit Default Risk |
1,304,215 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<train_model> | import pandas as pd
import numpy as np | Home Credit Default Risk |
1,304,215 | learn.fit_one_cycle(5, slice(lr))<train_model> | def return_size(df):
return round(sys.getsizeof(df)/ 1e9, 2)
def convert_types(df):
print(f'Original size of data: {return_size(df)} gb.')
for c in df:
if df[c].dtype == 'object':
df[c] = df[c].astype('category')
print(f'New size of data: {return_size(df)} gb.')
return df | Home Credit Default Risk |
1,304,215 | learn.fit_one_cycle(5, slice(lr))<train_model> | app_train = pd.read_csv('.. /input/application_train.csv' ).replace({365243: np.nan})
app_test = pd.read_csv('.. /input/application_test.csv' ).replace({365243: np.nan})
bureau = pd.read_csv('.. /input/bureau.csv' ).replace({365243: np.nan})
bureau_balance = pd.read_csv('.. /input/bureau_balance.csv' ).replace({365243: np.nan})
app_test['TARGET'] = np.nan
app = app_train.append(app_test, ignore_index = True, sort = True)
app = convert_types(app)
bureau = convert_types(bureau)
bureau_balance = convert_types(bureau_balance)
gc.enable()
del app_train, app_test
gc.collect() | Home Credit Default Risk |
1,304,215 | learn.fit_one_cycle(5, slice(lr))<save_model> | def agg_numeric(df, parent_var, df_name):
for col in df:
if col != parent_var and 'SK_ID' in col:
df = df.drop(columns = col)
parent_ids = df[parent_var].copy()
numeric_df = df.select_dtypes('number' ).copy()
numeric_df[parent_var] = parent_ids
agg = numeric_df.groupby(parent_var ).agg(['count', 'mean', 'max', 'min', 'sum'])
columns = []
for var in agg.columns.levels[0]:
if var != parent_var:
for stat in agg.columns.levels[1]:
columns.append('%s_%s_%s' %(df_name, var, stat))
agg.columns = columns
_, idx = np.unique(agg, axis = 1, return_index=True)
agg = agg.iloc[:, idx]
return agg | Home Credit Default Risk |
1,304,215 | learn.save("model-1" )<define_search_space> | def agg_categorical(df, parent_var, df_name):
categorical = pd.get_dummies(df.select_dtypes('category'))
categorical[parent_var] = df[parent_var]
categorical = categorical.groupby(parent_var ).agg(['sum', 'count', 'mean'])
column_names = []
for var in categorical.columns.levels[0]:
for stat in ['sum', 'count', 'mean']:
column_names.append('%s_%s_%s' %(df_name, var, stat))
categorical.columns = column_names
_, idx = np.unique(categorical, axis = 1, return_index = True)
categorical = categorical.iloc[:, idx]
return categorical | Home Credit Default Risk |
1,304,215 | lr_a = 1e-4<train_model> | def agg_child(df, parent_var, df_name):
df_agg = agg_numeric(df, parent_var, df_name)
df_agg_cat = agg_categorical(df, parent_var, df_name)
df_info = df_agg.merge(df_agg_cat, on = parent_var, how = 'outer')
_, idx = np.unique(df_info, axis = 1, return_index = True)
df_info = df_info.iloc[:, idx]
gc.enable()
del df_agg, df_agg_cat
gc.collect()
return df_info | Home Credit Default Risk |
1,304,215 | learn.fit_one_cycle(5,slice(lr_a,lr/5))
<save_model> | def agg_grandchild(df, parent_df, parent_var, grandparent_var, df_name):
parent_df = parent_df[[parent_var, grandparent_var]].copy().set_index(parent_var)
df_agg = agg_numeric(df, parent_var, '%s_LOAN' % df_name)
df_agg = df_agg.merge(parent_df,
on = parent_var, how = 'left')
df_agg_client = agg_numeric(df_agg, grandparent_var, '%s_CLIENT' % df_name)
if any(df.dtypes == 'category'):
df_agg_cat = agg_categorical(df, parent_var, '%s_LOAN' % df_name)
df_agg_cat = df_agg_cat.merge(parent_df,
on = parent_var, how = 'left')
df_agg_cat_client = agg_numeric(df_agg_cat, grandparent_var, '%s_CLIENT' % df_name)
df_info = df_agg_client.merge(df_agg_cat_client, on = grandparent_var, how = 'outer')
gc.enable()
del df_agg, df_agg_client, df_agg_cat, df_agg_cat_client
gc.collect()
else:
df_info = df_agg_client.copy()
gc.enable()
del df_agg, df_agg_client
gc.collect()
_, idx = np.unique(df_info, axis = 1, return_index=True)
df_info = df_info.iloc[:, idx]
return df_info | Home Credit Default Risk |
1,304,215 | learn.save("model-2" )<train_model> | app['LOAN_RATE'] = app['AMT_ANNUITY'] / app['AMT_CREDIT']
app['CREDIT_INCOME_RATIO'] = app['AMT_CREDIT'] / app['AMT_INCOME_TOTAL']
app['EMPLOYED_BIRTH_RATIO'] = app['DAYS_EMPLOYED'] / app['DAYS_BIRTH']
app['EXT_SOURCE_SUM'] = app[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].sum(axis = 1)
app['EXT_SOURCE_MEAN'] = app[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis = 1)
app['AMT_REQ_SUM'] = app[[x for x in app.columns if 'AMT_REQ_' in x]].sum(axis = 1 ) | Home Credit Default Risk |
1,304,215 | learn.fit_one_cycle(5,slice(1e-5,lr/5))
<save_model> | bureau['LOAN_RATE'] = bureau['AMT_ANNUITY'] / bureau['AMT_CREDIT_SUM'] | Home Credit Default Risk |
1,304,215 | learn.save("model-3" )<train_model> | bureau_info = agg_child(bureau, 'SK_ID_CURR', 'BUREAU')
bureau_info.head() | Home Credit Default Risk |
1,304,215 | learn.fit_one_cycle(5,slice(1e-5,lr/5))<load_pretrained> | bureau_balance['PAST_DUE'] = bureau_balance['STATUS'].isin(['1', '2', '3', '4', '5'])
bureau_balance['ON_TIME'] = bureau_balance['STATUS'] == '0' | Home Credit Default Risk |
1,304,215 | learn.load("model-3" )<predict_on_test> | bureau_balance_info = agg_grandchild(bureau_balance, bureau, 'SK_ID_BUREAU', 'SK_ID_CURR', 'BB')
del bureau_balance, bureau
bureau_balance_info.head() | Home Credit Default Risk |
1,304,215 | test_probs, _ = learn.get_preds(ds_type=DatasetType.Test)
test_preds = [data.classes[pred] for pred in np.argmax(test_probs.numpy() , axis=-1)]<save_to_csv> | app = app.set_index('SK_ID_CURR')
app = app.merge(bureau_info, on = 'SK_ID_CURR', how = 'left')
del bureau_info
app.shape | Home Credit Default Risk |
1,304,215 | sub_df.predicted_class = test_preds
sub_df.to_csv("submission.csv", index=False)
<save_to_csv> | app = app.merge(bureau_balance_info, on = 'SK_ID_CURR', how = 'left')
del bureau_balance_info
app.shape | Home Credit Default Risk |
1,304,215 | def create_download_link(df, title = "Download CSV file", filename = "data.csv"):
csv = df.to_csv(index = False)
b64 = base64.b64encode(csv.encode())
payload = b64.decode()
html = '<a download="{filename}" href="data:text/csv;base64,{payload}" target="_blank">{title}</a>'
html = html.format(payload=payload,title=title,filename=filename)
return HTML(html)
create_download_link(sub_df )<define_variables> | previous = pd.read_csv('.. /input/previous_application.csv' ).replace({365243: np.nan})
previous = convert_types(previous)
previous['LOAN_RATE'] = previous['AMT_ANNUITY'] / previous['AMT_CREDIT']
previous["AMT_DIFFERENCE"] = previous['AMT_CREDIT'] - previous['AMT_APPLICATION'] | Home Credit Default Risk |
1,304,215 | FileLink("/tmp/model/export.pkl")
<find_best_params> | app = app.merge(previous_info, on = 'SK_ID_CURR', how = 'left')
del previous_info
app.shape | Home Credit Default Risk |
1,304,215 | lr_find(learn )<save_model> | installments = pd.read_csv('.. /input/installments_payments.csv' ).replace({365243: np.nan})
installments = convert_types(installments)
installments['LATE'] = installments['DAYS_ENTRY_PAYMENT'] > installments['DAYS_INSTALMENT']
installments['LOW_PAYMENT'] = installments['AMT_PAYMENT'] < installments['AMT_INSTALMENT'] | Home Credit Default Risk |
1,304,215 | learn.save("model-2" )<load_pretrained> | app = app.merge(installments_info, on = 'SK_ID_CURR', how = 'left')
del installments_info
app.shape | Home Credit Default Risk |
1,304,215 | learn.load("model-2" )<load_pretrained> | cash = pd.read_csv('.. /input/POS_CASH_balance.csv' ).replace({365243: np.nan})
cash = convert_types(cash)
cash['LATE_PAYMENT'] = cash['SK_DPD'] > 0.0
cash['INSTALLMENTS_PAID'] = cash['CNT_INSTALMENT'] - cash['CNT_INSTALMENT_FUTURE'] | Home Credit Default Risk |
1,304,215 | data_1= ImageDataBunch.from_folder(train_path,valid_pct = 0.2,test = test_path,ds_tfms = get_transforms() ,size = 128 ).normalize(imagenet_stats )<feature_engineering> | cash_info = agg_grandchild(cash, previous, 'SK_ID_PREV', 'SK_ID_CURR', 'CASH')
del cash
cash_info.shape | Home Credit Default Risk |
1,304,215 | learn.data=data_1
learn = learn.to_fp16()<find_best_params> | app = app.merge(cash_info, on = 'SK_ID_CURR', how = 'left')
del cash_info
app.shape | Home Credit Default Risk |
1,304,215 | learn.freeze()
learn.lr_find()
learn.recorder.plot()<train_model> | credit = pd.read_csv('.. /input/credit_card_balance.csv' ).replace({365243: np.nan})
credit = convert_types(credit)
credit['OVER_LIMIT'] = credit['AMT_BALANCE'] > credit['AMT_CREDIT_LIMIT_ACTUAL']
credit['BALANCE_CLEARED'] = credit['AMT_BALANCE'] == 0.0
credit['LOW_PAYMENT'] = credit['AMT_PAYMENT_CURRENT'] < credit['AMT_INST_MIN_REGULARITY']
credit['LATE'] = credit['SK_DPD'] > 0.0 | Home Credit Default Risk |
1,304,215 | learn.fit_one_cycle(5,1e-3 )<find_best_params> | credit_info = agg_grandchild(credit, previous, 'SK_ID_PREV', 'SK_ID_CURR', 'CC')
del credit, previous
credit_info.shape | Home Credit Default Risk |
1,304,215 | learn.lr_find()
<train_model> | gc.collect()
gc.enable() | Home Credit Default Risk |
1,304,215 | learn.fit_one_cycle(2,1e-6 )<choose_model_class> | time.sleep(600)
app = app.merge(credit_info, on = 'SK_ID_CURR', how = 'left')
del credit_info
app.shape | Home Credit Default Risk |
1,304,215 | interp = ClassificationInterpretation.from_learner(learn)
<save_model> | print('After manual feature engineering, there are {} features.'.format(app.shape[1] - 2)) | Home Credit Default Risk |
1,304,215 | learn.save("model1" )<find_best_params> | gc.enable()
gc.collect() | Home Credit Default Risk |
1,304,215 | lr_find(learn )<load_from_csv> | app.to_csv('clean_manual_features.csv', chunksize = 100 ) | Home Credit Default Risk |
1,304,215 | df = pd.read_csv("submission.csv" )<save_to_csv> | app.reset_index(inplace = True)
train, test = app[app['TARGET'].notnull() ].copy() , app[app['TARGET'].isnull() ].copy()
gc.enable()
del app
gc.collect() | Home Credit Default Risk |
1,304,215 | test_probs, _ = learn.get_preds(ds_type=DatasetType.Test)
test_preds = [data.classes[pred] for pred in np.argmax(test_probs.numpy() , axis=-1)]
sub_df.predicted_class = test_preds
sub_df.to_csv("submission.csv", index=False)
sub_df.head()<save_to_csv> | params = {'is_unbalance': True,
'n_estimators': 2673,
'num_leaves': 77,
'learning_rate': 0.00764,
'min_child_samples': 460,
'boosting_type': 'gbdt',
'subsample_for_bin': 240000,
'reg_lambda': 0.20,
'reg_alpha': 0.88,
'subsample': 0.95,
'colsample_bytree': 0.7} | Home Credit Default Risk |
1,304,215 | def create_download_link(df, title = "Download CSV file", filename = "data.csv"):
csv = df.to_csv()
b64 = base64.b64encode(csv.encode())
payload = b64.decode()
html = '<a download="{filename}" href="data:text/csv;base64,{payload}" target="_blank">{title}</a>'
html = html.format(payload=payload,title=title,filename=filename)
return HTML(html)
create_download_link(df )<import_modules> | train_labels = np.array(train.pop('TARGET')).reshape(( -1,))
test_ids = list(test.pop('SK_ID_CURR'))
test = test.drop(columns = ['TARGET'])
train = train.drop(columns = ['SK_ID_CURR'])
print('Training shape: ', train.shape)
print('Testing shape: ', test.shape ) | Home Credit Default Risk |
1,304,215 | learn.export('/tmp/model/learn.pkl')
<import_modules> | model = lgb.LGBMClassifier(**params)
model.fit(train, train_labels ) | Home Credit Default Risk |
1,304,215 | from fastai.vision import *
from fastai.metrics import error_rate<create_dataframe> | preds = model.predict_proba(test)[:, 1]
submission = pd.DataFrame({'SK_ID_CURR': test_ids,
'TARGET': preds})
submission['SK_ID_CURR'] = submission['SK_ID_CURR'].astype(int)
submission['TARGET'] = submission['TARGET'].astype(float)
submission.to_csv('submission_manual.csv', index = False ) | Home Credit Default Risk |
1,304,215 | train_arr = []
for file in glob.glob(".. /input/train/train/*/*"):
train_arr.append({"name": file, "label": file.split("/")[-2]})
df = pd.DataFrame(train_arr )<load_from_csv> | features = list(train.columns)
fi = pd.DataFrame({'feature': features,
'importance': model.feature_importances_} ) | Home Credit Default Risk |
1,296,130 | test_df = pd.read_csv(f".. /input/sample_submission.csv" )<count_values> | app_train = pd.read_csv(path + "application_train.csv")
app_train.head()
| Home Credit Default Risk |
1,296,130 | df["label"].value_counts()<set_options> | bureau = pd.read_csv(path + "bureau.csv")
bureau['YEAR']=(( bureau['DAYS_CREDIT'] /365)).abs().pow(0.5 ).round(0)
bureau.head()
| Home Credit Default Risk |
1,296,130 | init_notebook_mode(connected=True)
<install_modules> | bureau_balance = pd.read_csv(path + "bureau_balance.csv")
bureau_balance['YEAR']=(( bureau_balance['MONTHS_BALANCE'])).abs().pow(0.2 ).round(0)
bureau_balance.head(20)
| Home Credit Default Risk |
1,296,130 | !pip install imagesize<feature_engineering> | credit_card_balance = pd.read_csv(path + "credit_card_balance.csv")
credit_card_balance.head() | Home Credit Default Risk |
1,296,130 | df["width"] = 0
df["height"] = 0
df["aspect_ratio"] = 0.0
for idx, row in df.iterrows() :
width, height = imagesize.get(row["name"])
df.at[idx, "width"] = width
df.at[idx, "height"] = height
df.at[idx, "aspect_ratio"] = float(height)/ float(width )<load_pretrained> | pcb = pd.read_csv(path + "POS_CASH_balance.csv")
pcb.head() | Home Credit Default Risk |
1,296,130 | path = Path(".. /input")
SEED = 24
tfms = get_transforms(do_flip=True, max_rotate=10, max_zoom=1.3, max_lighting=0.4, max_warp=0.25, xtra_tfms=[rgb_randomize(channel=0, thresh=0.9, p=0.1),rgb_randomize(channel=2, thresh=0.9, p=0.1),rgb_randomize(channel=2, thresh=0.9, p=0.1)])
data = ImageDataBunch.from_folder(path/"train",valid_pct=0.2, ds_tfms=tfms, size=128, bs=64, seed=SEED ).normalize(imagenet_stats )<choose_model_class> | previous_application = pd.read_csv(path + "previous_application.csv")
previous_application.head() | Home Credit Default Risk |
1,296,130 | learn = cnn_learner(data, models.resnet34, metrics=[accuracy],model_dir="/tmp/model/" )<train_model> | installments_payments = pd.read_csv(path + "installments_payments.csv")
installments_payments.head() | Home Credit Default Risk |
1,296,130 | lr=5e-2
learn.fit_one_cycle(15,slice(lr))<save_model> | app_test = pd.read_csv('.. /input/application_test.csv')
app_test['is_test'] = 1
app_test['is_train'] = 0
app_train['is_test'] = 0
app_train['is_train'] = 1
Y = app_train['TARGET']
train_X = app_train.drop(['TARGET'], axis = 1)
test_id = app_test['SK_ID_CURR']
test_X = app_test
data = pd.concat([train_X, test_X], axis=0 ).set_index('SK_ID_CURR' ) | Home Credit Default Risk |
1,296,130 | learn.save('stage1' )<load_pretrained> | def _get_categorical_features(df):
feats = [col for col in list(df.columns)if df[col].dtype == 'object']
return feats
def _factorize_categoricals(df, cats):
for col in cats:
df[col], _ = pd.factorize(df[col])
return df
def _get_dummies(df, cats):
for col in cats:
df = pd.concat([df, pd.get_dummies(df[col], prefix=col)], axis=1)
return df
data_cats = _get_categorical_features(data)
prev_app_cats = _get_categorical_features(previous_application)
bureau_cats = _get_categorical_features(bureau)
pcb_cats = _get_categorical_features(pcb)
ccbal_cats = _get_categorical_features(credit_card_balance)
previous_application = _get_dummies(previous_application, prev_app_cats)
bureau = _get_dummies(bureau, bureau_cats)
pcb = _get_dummies(pcb, pcb_cats)
credit_card_balance = _get_dummies(credit_card_balance, ccbal_cats)
data = _factorize_categoricals(data, data_cats ) | Home Credit Default Risk |
1,296,130 | learn.load('stage1' )<train_model> | prev_apps_count = previous_application[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count()
previous_application['SK_ID_PREV'] = previous_application['SK_ID_CURR'].map(prev_apps_count['SK_ID_PREV'])
prev_apps_avg = previous_application.groupby('SK_ID_CURR' ).mean()
data = data.merge(right=prev_apps_avg.reset_index() , how='left', on='SK_ID_CURR')
data.head() | Home Credit Default Risk |
1,296,130 | learn.fit_one_cycle(10, slice(7e-6,(7e-6)/10))<save_model> | bjoined = bureau_balance.merge(right=bureau[['AMT_CREDIT_SUM','SK_ID_BUREAU','SK_ID_CURR']].reset_index() , how='inner', on='SK_ID_BUREAU')
bjoined = bjoined.merge(right=app_train[['AMT_CREDIT','SK_ID_CURR']].reset_index() , how='inner', on='SK_ID_CURR')
bjoined['AMT_WEIGHT'] =(bjoined['AMT_CREDIT_SUM'] / bjoined['AMT_CREDIT'] ).pow (.04 ).round(1)
bpv = pd.pivot_table(bjoined[['SK_ID_CURR','AMT_WEIGHT','YEAR']][(bjoined.AMT_WEIGHT < 1.2)&(bjoined.AMT_WEIGHT >.8)],index=['SK_ID_CURR'], columns=['AMT_WEIGHT'], aggfunc=len, fill_value=0)
bjoined.head()
| Home Credit Default Risk |
1,296,130 | learn.save('stage-2' )<load_pretrained> | bflattened = pd.DataFrame(bpv.to_records() ).set_index(['SK_ID_CURR'])
bflattened.columns = ['year_weight_table_' + col for col in bflattened.columns ]
bflattened.head()
| Home Credit Default Risk |
1,296,130 | path = Path(".. /input")
SEED = 24
tfms = get_transforms(do_flip=True, max_rotate=10, max_zoom=1.3, max_lighting=0.4, max_warp=0.25, xtra_tfms=[rgb_randomize(channel=0, thresh=0.9, p=0.1),rgb_randomize(channel=2, thresh=0.9, p=0.1),rgb_randomize(channel=2, thresh=0.9, p=0.1)])
data = ImageDataBunch.from_folder(path/"train",valid_pct=0.2, ds_tfms=tfms, size=256, bs=64, seed=SEED ).normalize(imagenet_stats )<prepare_output> | data = data.merge(right=bflattened.reset_index() , how='left', on='SK_ID_CURR')
gc.collect()
data.head()
| Home Credit Default Risk |
1,296,130 | learn.data = data<define_search_space> | Home Credit Default Risk | |
1,296,130 | lr=7e-3<train_model> | bjoined = None
gc.collect()
bbgrouped = bflattened.groupby('SK_ID_CURR' ).sum(min_count=1)
bbgrouped.columns = ['bbpv_sum_' + hdr.replace("('", "_" ).replace("',", "_" ).replace(")", "")\
for hdr in bbgrouped.columns]
bbgrouped.head()
data = data.merge(right=bbgrouped.reset_index() , how='left', on='SK_ID_CURR')
data.head()
data.head(2)
| Home Credit Default Risk |
1,296,130 | learn.fit_one_cycle(15, slice(lr))<save_model> | bflattened = None
bbgrouped = None
gc.collect()
bjoined = bureau.merge(right=app_train[['AMT_CREDIT','SK_ID_CURR']].reset_index() , how='inner', on='SK_ID_CURR')
bjoined['AMT_WEIGHT'] = bjoined['AMT_CREDIT_SUM'] / bjoined['AMT_CREDIT']
bjoined.head()
| Home Credit Default Risk |
1,296,130 | learn.save('stage-3' )<load_pretrained> | pv = pd.pivot_table(bjoined, index='SK_ID_CURR', columns=['CREDIT_ACTIVE','YEAR'], \
values=['AMT_WEIGHT', \
'AMT_CREDIT_MAX_OVERDUE', \
'DAYS_CREDIT_ENDDATE'],aggfunc='sum')
flattened = pd.DataFrame(pv.to_records() ).set_index('SK_ID_CURR')
flattened.columns = ['bpv_' + hdr.replace("('", "_" ).replace("',", "_" ).replace(")", "")\
for hdr in flattened.columns]
pv = None
flattened.head(10)
| Home Credit Default Risk |
1,296,130 | learn.load('stage-3' )<train_model> | bjoined = None
gc.collect()
data.columns
data.set_index('SK_ID_CURR')
data = data.merge(right=flattened.reset_index() , how='left', on='SK_ID_CURR')
data.head() | Home Credit Default Risk |
1,296,130 | learn.fit_one_cycle(10, slice(1e-5, 1e-4))<load_from_csv> | bureau_avg = bureau.groupby('SK_ID_CURR' ).mean()
bureau_avg['buro_count'] = bureau[['SK_ID_BUREAU','SK_ID_CURR']].groupby('SK_ID_CURR' ).count() ['SK_ID_BUREAU']
fields = ['AMT_CREDIT_SUM','AMT_CREDIT_SUM_DEBT','CREDIT_DAY_OVERDUE','DAYS_CREDIT_ENDDATE','DAYS_CREDIT']
dates = [300,2000]
for f in fields:
lasti = 0
for i in dates:
bureau_avg['buro_wind_'+ f + str(i)] =(bureau[(bureau.DAYS_CREDIT_ENDDATE < i)&(bureau.DAYS_CREDIT_ENDDATE > lasti)])[[f,'SK_ID_CURR']].groupby('SK_ID_CURR' ).sum()
bureau_avg['buro_'+ f + str(i)] =(bureau[bureau.DAYS_CREDIT_ENDDATE < i])[[f,'SK_ID_CURR']].groupby('SK_ID_CURR' ).sum()
if lasti > 0:
bureau_avg['buro_derivitive_' + f + str(i)] = bureau_avg['buro_' + f + str(lasti)] /(bureau_avg['buro_' + f + str(i)]+ 0.0001)
lasti = i
bureau_avg.columns = ['b_' + f_ for f_ in bureau_avg.columns]
bureau_avg.head()
data = data.merge(right=bureau_avg.reset_index() , how='left', on='SK_ID_CURR')
for f in fields:
for i in dates:
data['b_buro_weighted_' + f + str(i)] =(data['b_buro_' + f + str(i)] / data['AMT_CREDIT_x'])
data.head(10)
| Home Credit Default Risk |
1,296,130 | path = ".. /input"
test_df = pd.read_csv(f"{path}/sample_submission.csv")
sub_df = pd.read_csv(f"{path}/sample_submission.csv")
data.add_test(ImageList.from_df(test_df, path, folder="test/test"))<predict_on_test> | flattened = None
bureau_avg = None
gc.collect()
cnt_inst = installments_payments[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count()
installments_payments['SK_ID_PREV'] = installments_payments['SK_ID_CURR'].map(cnt_inst['SK_ID_PREV'])
avg_inst = installments_payments.groupby('SK_ID_CURR' ).mean()
avg_inst.columns = ['i_' + f_ for f_ in avg_inst.columns]
data = data.merge(right=avg_inst.reset_index() , how='left', on='SK_ID_CURR')
| Home Credit Default Risk |
1,296,130 | test_probs, _ = learn.get_preds(ds_type=DatasetType.Test)
test_preds = [data.classes[pred] for pred in np.argmax(test_probs.numpy() , axis=-1)]<save_to_csv> | cnt_inst = None
avg_inst = None
gc.collect()
pcb_count = pcb[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count()
pcb['SK_ID_PREV'] = pcb['SK_ID_CURR'].map(pcb_count['SK_ID_PREV'])
pcb_avg = pcb.groupby('SK_ID_CURR' ).mean()
fields = ['CNT_INSTALMENT_FUTURE']
dates = [-12,-36]
for f in fields:
lasti = 0
for i in dates:
pcb_avg['s_'+ f + str(i)] =(pcb[(pcb.MONTHS_BALANCE < i)])[[f,'SK_ID_CURR']].groupby('SK_ID_CURR' ).sum()
data = data.merge(right=pcb_avg.reset_index() , how='left', on='SK_ID_CURR')
for f in fields:
for i in dates:
data['pcb' + '_wg_' + f + str(i)] =(data['s_' + f + str(i)] / data['AMT_CREDIT_x'])
data.head()
| Home Credit Default Risk |
1,296,130 | sub_df = pd.read_csv(f"{path}/sample_submission.csv")
sub_df.predicted_class = test_preds
sub_df.to_csv("submission.csv", index=False )<set_options> | pcb_avg = None
gc.collect()
nb_prevs = credit_card_balance[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count()
credit_card_balance['SK_ID_PREV'] = credit_card_balance['SK_ID_CURR'].map(nb_prevs['SK_ID_PREV'])
avg_cc_bal = credit_card_balance.groupby('SK_ID_CURR' ).mean()
fields = ['AMT_BALANCE','AMT_DRAWINGS_ATM_CURRENT']
dates = [-12,-36]
for f in fields:
lasti = 0
for i in dates:
avg_cc_bal['s_'+ f + str(i)] =(credit_card_balance[(credit_card_balance.MONTHS_BALANCE < i)])[[f,'SK_ID_CURR']].groupby('SK_ID_CURR' ).sum()
avg_cc_bal.columns = ['cc_bal_' + f_ for f_ in avg_cc_bal.columns]
data = data.merge(right=avg_cc_bal.reset_index() , how='left', on='SK_ID_CURR')
for f in ['AMT_BALANCE']:
for p in ['cc_bal_s_']:
for i in dates:
data[p + '_wg_' + f + str(i)] =(data[p + f + str(i)] / data['AMT_CREDIT_x'])
data.head(10)
avg_cc_bal.head()
| Home Credit Default Risk |
1,296,130 | pd.set_option("display.max_columns", 500)
pd.set_option("display.max_rows", 200)
plt.rcParams['figure.figsize'] = [15, 6]
sns.set_style("darkgrid" )<install_modules> | nb_prevs = None
avg_cc_bal = None
gc.collect()
| Home Credit Default Risk |
1,296,130 | !pip install pandas-profiling<load_from_csv> | ignore_features = ['SK_ID_CURR', 'is_train', 'is_test']
relevant_features = [col for col in data.columns if col not in ignore_features]
trainX = data[data['is_train'] == 1][relevant_features]
testX = data[data['is_test'] == 1][relevant_features]
x_train, x_val, y_train, y_val = train_test_split(trainX, Y, test_size=0.2, random_state=18)
lgb_train = lgb.Dataset(data=x_train, label=y_train)
lgb_eval = lgb.Dataset(data=x_val, label=y_val ) | Home Credit Default Risk |
1,296,130 | online_sales = pd.read_csv('/kaggle/input/uisummerschool/Online_sales.csv', sep=',')
online_sales.head()<define_variables> | params = {'task': 'train', 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': 'auc',
'learning_rate': 0.01, 'num_leaves': 48, 'num_iteration': 5000, 'verbose': 0 ,
'colsample_bytree':.8, 'subsample':.9, 'max_depth':7, 'reg_alpha':.1, 'reg_lambda':.1,
'min_split_gain':.01, 'min_child_weight':1}
model = lgb.train(params, lgb_train, valid_sets=lgb_eval, early_stopping_rounds=150, verbose_eval=200 ) | Home Credit Default Risk |
1,296,130 | <define_variables><EOS> | preds = model.predict(testX)
sub_lgb = pd.DataFrame()
sub_lgb['SK_ID_CURR'] = test_id
sub_lgb['TARGET'] = preds
sub_lgb.to_csv("lgb_baseline.csv", index=False)
sub_lgb.head() | Home Credit Default Risk |
1,249,981 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<filter> | N_FOLDS = 5
MAX_EVALS = 5 | Home Credit Default Risk |
1,249,981 | condition1= online_sales['Product SKU'] == 'GGOENEBQ079099'
online_sales[(condition1)]
condition2= online_sales['Quantity'] > 2
online_sales[(condition1)&(condition2)]<feature_engineering> | features = pd.read_csv('.. /input/home-credit-default-risk/application_train.csv')
features = features.sample(n = 16000, random_state = 42)
features = features.select_dtypes('number')
labels = np.array(features['TARGET'].astype(np.int32)).reshape(( -1,))
features = features.drop(columns = ['TARGET', 'SK_ID_CURR'])
train_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size = 6000, random_state = 50 ) | Home Credit Default Risk |
1,249,981 | test = online_sales [['Date', 'Product SKU', 'Quantity', 'Revenue', 'Tax', 'Delivery']]
test['Net_Income'] = test['Revenue'] - test['Tax'] - test['Delivery']
test.head()<feature_engineering> | train_set = lgb.Dataset(data = train_features, label = train_labels)
test_set = lgb.Dataset(data = test_features, label = test_labels ) | Home Credit Default Risk |
1,249,981 | kondisi = test['Tax'].isnull()
test.loc[kondisi, ['Tax']] = 1<groupby> | model = lgb.LGBMClassifier()
default_params = model.get_params()
del default_params['n_estimators']
cv_results = lgb.cv(default_params, train_set, num_boost_round = 10000, early_stopping_rounds = 100,
metrics = 'auc', nfold = N_FOLDS, seed = 42 ) | Home Credit Default Risk |
1,249,981 | test = online_sales.groupby(['Date'])['Quantity'].sum().reset_index()
test.head()<groupby> | print('The maximum validation ROC AUC was: {:.5f} with a standard deviation of {:.5f}.'.format(cv_results['auc-mean'][-1], cv_results['auc-stdv'][-1]))
print('The optimal number of boosting rounds(estimators)was {}.'.format(len(cv_results['auc-mean'])) ) | Home Credit Default Risk |
1,249,981 | test = online_sales.groupby(['Date', 'Product SKU'])['Quantity'].sum().reset_index()
test.head()<groupby> | from sklearn.metrics import roc_auc_score | Home Credit Default Risk |
1,249,981 | test = online_sales.groupby(['Date'] ).agg({'Quantity': 'sum',
'Revenue': 'sum',
'Tax': 'sum',
'Product SKU': 'count',
'Transaction ID': 'count',
} ).reset_index()
test.head()<sort_values> | model.n_estimators = len(cv_results['auc-mean'])
model.fit(train_features, train_labels)
preds = model.predict_proba(test_features)[:, 1]
baseline_auc = roc_auc_score(test_labels, preds)
print('The baseline model scores {:.5f} ROC AUC on the test set.'.format(baseline_auc)) | Home Credit Default Risk |
1,249,981 | online_sales.sort_values(by=['Quantity'], ascending = False ).head(15 )<drop_column> | def objective(hyperparameters, iteration):
if 'n_estimators' in hyperparameters.keys() :
del hyperparameters['n_estimators']
cv_results = lgb.cv(hyperparameters, train_set, num_boost_round = 10000, nfold = N_FOLDS,
early_stopping_rounds = 100, metrics = 'auc', seed = 42)
score = cv_results['auc-mean'][-1]
estimators = len(cv_results['auc-mean'])
hyperparameters['n_estimators'] = estimators
return [score, hyperparameters, iteration] | Home Credit Default Risk |
1,249,981 | test.rename(index=str, columns={"Quantity": "Total Quantity", "Revenue": "Total Revenue"}, inplace = True)
test.drop(columns=['Product SKU', 'Transaction ID'], inplace = True)
test.head()<load_from_csv> | score, params, iteration = objective(default_params, 1)
print('The cross-validation ROC AUC was {:.5f}.'.format(score)) | Home Credit Default Risk |
1,249,981 | online_sales = online_sales = pd.read_csv('/kaggle/input/uisummerschool/Online_sales.csv')
backup = online_sales.copy()
daily_online_revenue = online_sales.groupby(['Date'])['Revenue'].sum().reset_index()
daily_online_revenue.tail()<create_dataframe> | model = lgb.LGBMModel()
model.get_params() | Home Credit Default Risk |
1,249,981 | add_data = [['2017-12-01', 0], ['2017-12-02', 0], ['2017-12-03', 0],
['2017-12-04', 0], ['2017-12-05', 0], ['2017-12-06', 0],
['2017-12-07', 0], ['2017-12-08', 0], ['2017-12-09', 0],
['2017-12-10', 0], ['2017-12-11', 0], ['2017-12-10', 0],
['2017-12-13', 0], ['2017-12-14', 0]
]
add_data_df = pd.DataFrame(add_data, columns = ['Date', 'Revenue'])
add_data_df['Date'] = add_data_df['Date'].astype(str)
add_data_df['Date'] = pd.to_datetime(add_data_df['Date'])
daily_online_revenue = daily_online_revenue.append(add_data_df)
df_rev = daily_online_revenue.copy()
daily_online_revenue.tail(20 )<feature_engineering> | param_grid = {
'boosting_type': ['gbdt', 'goss', 'dart'],
'num_leaves': list(range(20, 150)) ,
'learning_rate': list(np.logspace(np.log10(0.005), np.log10(0.5), base = 10, num = 1000)) ,
'subsample_for_bin': list(range(20000, 300000, 20000)) ,
'min_child_samples': list(range(20, 500, 5)) ,
'reg_alpha': list(np.linspace(0, 1)) ,
'reg_lambda': list(np.linspace(0, 1)) ,
'colsample_bytree': list(np.linspace(0.6, 1, 10)) ,
'subsample': list(np.linspace(0.5, 1, 100)) ,
'is_unbalance': [True, False]
} | Home Credit Default Risk |
1,249,981 | daily_online_revenue['d-1_rev'] = daily_online_revenue['Revenue'].shift(1)
daily_online_revenue['d-2_rev'] = daily_online_revenue['Revenue'].shift(2)
daily_online_revenue['d-3_rev'] = daily_online_revenue['Revenue'].shift(3)
daily_online_revenue['d-4_rev'] = daily_online_revenue['Revenue'].shift(4)
daily_online_revenue['d-5_rev'] = daily_online_revenue['Revenue'].shift(5)
daily_online_revenue = daily_online_revenue.dropna()<split> | a = 0
b = 0
for x in param_grid['learning_rate']:
if x >= 0.005 and x < 0.05:
a += 1
elif x >= 0.05 and x < 0.5:
b += 1
print('There are {} values between 0.005 and 0.05'.format(a))
print('There are {} values between 0.05 and 0.5'.format(b)) | Home Credit Default Risk |
1,249,981 | train_data = daily_online_revenue.loc[:'2017-11-30']
test_data = daily_online_revenue.loc['2017-12-1':]<prepare_x_and_y> | random_results = pd.DataFrame(columns = ['score', 'params', 'iteration'],
index = list(range(MAX_EVALS)))
grid_results = pd.DataFrame(columns = ['score', 'params', 'iteration'],
index = list(range(MAX_EVALS)) ) | Home Credit Default Risk |
1,249,981 | x_train = train_data.drop('Revenue', 1)
df = train_data[['Revenue']].to_string(index=False ).split('
')
y_train =pd.DataFrame({'Revenue': df})
y_train=y_train.drop(y_train.index[[0]])
x_test = test_data.drop('Revenue', 1)
df = test_data[['Revenue']].to_string(index=False ).split('
')
y_test =pd.DataFrame({'Revenue': df})
y_test=y_test.drop(y_test.index[[0]])
y_test
<train_model> | com = 1
for x in param_grid.values() :
com *= len(x)
print('There are {} combinations'.format(com)) | Home Credit Default Risk |
1,249,981 | def fit(x_train, y_train):
model = RandomForestRegressor(random_state=1)
model.fit(x_train, y_train)
return model
def predict(model, x_test):
y_pred = model.predict(x_test)
return y_pred
model = fit(x_train, y_train)
<categorify> | print('This would take {:.0f} years to finish.'.format(( 100 * com)/(60 * 60 * 24 * 365)) ) | Home Credit Default Risk |
1,249,981 | def preprocess(dataset):
processed_dataset = dataset.copy()
processed_dataset['d-1_rev'] = processed_dataset['Revenue'].shift(1)
processed_dataset['d-2_rev'] = processed_dataset['Revenue'].shift(2)
processed_dataset['d-3_rev'] = processed_dataset['Revenue'].shift(3)
processed_dataset['d-4_rev'] = processed_dataset['Revenue'].shift(4)
processed_dataset['d-5_rev'] = processed_dataset['Revenue'].shift(4)
processed_dataset = processed_dataset.dropna()
return processed_dataset
def split_label_and_predictor(train_or_test_data):
x_data = train_or_test_data.drop('Revenue', 1)
df = train_or_test_data[['Revenue']].to_string(index=False ).split('
')
y_data =pd.DataFrame({'Revenue': df})
y_data=y_data.drop(y_data.index[[0]])
return x_data, y_data
def split_train_test(dataset, end_of_training_date):
training_data = dataset.loc[:end_of_training_date]
testing_data = dataset.loc["2017-12-1":]
return training_data, testing_data
df_rev2 = df_rev.copy()
n_iteration = len(x_test)
result = []
for i in range(n_iteration):
y_pred = predict(model, pd.DataFrame(x_test.iloc[i] ).transpose())
result.append(y_pred[0])
df_rev2.loc[df_rev2["Date"]==x_test.index[i],"Revenue"] = y_pred
daily_online_revenue = preprocess(df_rev2 ).set_index('Date')
_, testing_data = split_train_test(daily_online_revenue,end_of_training_date)
x_test, _ = split_label_and_predictor(testing_data)
result<compute_test_metric> | grid_results = grid_search(param_grid)
print('The best validation score was {:.5f}'.format(grid_results.loc[0, 'score']))
print('
The best hyperparameters were:')
pprint.pprint(grid_results.loc[0, 'params'] ) | Home Credit Default Risk |
1,249,981 | comparison = pd.DataFrame({"Prediction":result,"Actual":y_test['Revenue']})
comparison.index = y_test.index
error = sqrt(mean_squared_error(comparison["Actual"], comparison["Prediction"]))
print("Error Score(RMSE)= {}".format(round(error,2)))
historical = pd.DataFrame(y_train ).rename(columns={"Revenue":"Actual"} ).tail(14)
pd.concat([historical,comparison],sort=True ).plot() ;<save_to_csv> | grid_search_params = grid_results.loc[0, 'params']
model = lgb.LGBMClassifier(**grid_search_params, random_state=42)
model.fit(train_features, train_labels)
preds = model.predict_proba(test_features)[:, 1]
print('The best model from grid search scores {:.5f} ROC AUC on the test set.'.format(roc_auc_score(test_labels, preds)) ) | Home Credit Default Risk |
1,249,981 | formatted_result = pd.DataFrame(result ).reset_index().rename(columns={"index":"Id",0:"Revenue"})
display(formatted_result)
formatted_result[['Id', 'Revenue']].to_csv("result.csv",index=False )<load_from_csv> | pd.options.display.max_colwidth = 1000
grid_results['params'].values | Home Credit Default Risk |
1,249,981 | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
sample = pd.read_csv('.. /input/test.csv' )<prepare_x_and_y> | random.seed(50)
random_params = {k: random.sample(v, 1)[0] for k, v in param_grid.items() }
random_params['subsample'] = 1.0 if random_params['boosting_type'] == 'goss' else random_params['subsample']
random_params | Home Credit Default Risk |
1,249,981 | x_train = train[['LotArea','LotFrontage']].copy()
y_train = train['SalePrice'].copy()<prepare_x_and_y> | def random_search(param_grid, max_evals = MAX_EVALS):
results = pd.DataFrame(columns = ['score', 'params', 'iteration'],
index = list(range(MAX_EVALS)))
for i in range(MAX_EVALS):
hyperparameters = {k: random.sample(v, 1)[0] for k, v in param_grid.items() }
hyperparameters['subsample'] = 1.0 if hyperparameters['boosting_type'] == 'goss' else hyperparameters['subsample']
eval_results = objective(hyperparameters, i)
results.loc[i, :] = eval_results
results.sort_values('score', ascending = False, inplace = True)
results.reset_index(inplace = True)
return results | Home Credit Default Risk |
1,249,981 | x_test = test[['LotArea','LotFrontage']].copy()<count_missing_values> | random_results = random_search(param_grid)
print('The best validation score was {:.5f}'.format(random_results.loc[0, 'score']))
print('
The best hyperparameters were:')
pprint.pprint(random_results.loc[0, 'params'] ) | Home Credit Default Risk |
1,249,981 | x_train.isnull().sum()<correct_missing_values> | random_search_params = random_results.loc[0, 'params']
model = lgb.LGBMClassifier(**random_search_params, random_state = 42)
model.fit(train_features, train_labels)
preds = model.predict_proba(test_features)[:, 1]
print('The best model from random search scores {:.5f} ROC AUC on the test set.'.format(roc_auc_score(test_labels, preds)) ) | Home Credit Default Risk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.