kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
1,276,329 | def conv3x3(in_planes, out_planes, stride=1, padding=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3,
stride=stride, padding=padding, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class SSELayer(nn.Module):
def __init__(self, channel):
super().__init__()
self.conv = nn.Sequential(
conv1x1(channel, 1),
nn.Sigmoid()
)
def forward(self, x):
y = self.conv(x)
return x * y.expand_as(x)
class CSELayer(nn.Module):
def __init__(self, channel, reduction=16):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x ).view(b, c)
y = self.fc(y ).view(b, c, 1, 1)
return x * y.expand_as(x)
class SCSELayer(nn.Module):
def __init__(self, channel, reduction=16):
super().__init__()
self.sse = SSELayer(channel)
self.cse = CSELayer(channel, reduction)
def forward(self, x):
return self.sse(x)+ self.cse(x)
def se_layer(channel, se_type=None, reduction=16):
if se_type is None:
return nn.Identity()
elif se_type == 'cse':
return CSELayer(channel, reduction)
elif se_type == 'sse':
return SSELayer(channel)
elif se_type == 'scse':
return SCSELayer(channel, reduction)
else:
raise NotImplementedError
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
norm_layer=None, se_type=None, reduction=16):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.se = se_layer(planes, se_type=se_type, reduction=reduction)
self.downsample = downsample
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.se(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10, width_per_group=64,
se_type=None, reduction=16):
super(ResNet, self ).__init__()
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.base_width = width_per_group
self.se_type = se_type
self.reduction = reduction
self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(( 1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules() :
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m,(nn.BatchNorm2d, nn.GroupNorm)) :
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
norm_layer = self._norm_layer
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample=downsample, norm_layer=norm_layer,
se_type=self.se_type, reduction=self.reduction))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, norm_layer=norm_layer,
se_type=self.se_type, reduction=self.reduction))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def small_resnet18(se_type='scse'):
return ResNet(BasicBlock, [2, 2, 2, 2], se_type=se_type)
def small_resnet34(se_type='scse'):
return ResNet(BasicBlock, [3, 4, 6, 3], se_type=se_type )<compute_train_metric> | if run_mode == 'LGBM_KFold':
folds = KFold(n_splits=10, shuffle=True, random_state=1024)
oof_preds = np.zeros(train.shape[0])
sub_preds = np.zeros(test.shape[0])
feature_importance = pd.DataFrame()
feats = train.columns
start = time()
for n_fold,(train_index, valid_index)in enumerate(folds.split(train, target)) :
train_x, train_y = train.iloc[train_index], target.iloc[train_index]
valid_x, valid_y = train.iloc[valid_index], target.iloc[valid_index]
clf = lgb.LGBMClassifier(
nthread=4,
n_estimators=10000,
learning_rate=0.02,
num_leaves=34,
colsample_bytree=0.9497036,
subsample=0.8715623,
max_depth=8,
reg_alpha=0.041545473,
reg_lambda=0.0735294,
min_split_gain=0.0222415,
min_child_weight=39.3259775,
silent=-1,
verbose=-1,
)
clf.fit(
train_x,
train_y,
eval_set = [(valid_x, valid_y)],
eval_metric = 'auc',
verbose = 200,
early_stopping_rounds = 500,
)
oof_preds[valid_index] = clf.predict_proba(valid_x, num_iterations=clf.best_iteration_)[:, 1]
sub_preds += clf.predict_proba(test, num_iterations=clf.best_iteration_)[:, 1] / folds.n_splits
fold_importance = pd.DataFrame()
fold_importance['feature'] = feats
fold_importance['importance'] = clf.feature_importances_
fold_importance['fold'] = n_fold + 1
feature_importance = pd.concat([feature_importance, fold_importance], axis=0)
print('Fold {:02d} AUC: {:.6f}'.format(n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_index])))
end = time()
print("
Estimator fit time: {} seconds".format(int(round(end - start))))
print('Full AUC score: {:.6f}'.format(roc_auc_score(target, oof_preds)) ) | Home Credit Default Risk |
1,276,329 | class AverageMeter:
def __init__(self):
self.reset()
def reset(self):
self.val = 0.0
self.avg = 0.0
self.sum = 0.0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy_score_torch(y_pred, y):
y_pred = torch.argmax(y_pred, axis=1 ).cpu().numpy()
y = y.cpu().numpy()
return accuracy_score(y, y_pred)
def rand_bbox(size, lam):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1.- lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2<train_model> | if run_mode == 'LGBM_KFold':
submission = pd.DataFrame()
submission['SK_ID_CURR'] = test_id
submission['TARGET'] = sub_preds
submission.to_csv('LGBM_SKFold.csv', index=False ) | Home Credit Default Risk |
1,276,329 | def train(
params,
model,
optimizer,
criterion,
dataloader,
parent_bar=None,
):
model.train()
losses = AverageMeter()
metrics = AverageMeter()
for x, y in progress_bar(dataloader, parent=parent_bar):
x = x.to(dtype=torch.float32, device=DEVICE)
y = y.to(dtype=torch.long, device=DEVICE)
optimizer.zero_grad()
y_pred = model(x)
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
losses.update(loss.item())
metrics.update(accuracy_score_torch(y_pred, y))
return losses.avg, metrics.avg
def valid(
params,
model,
criterion,
dataloader,
):
model.eval()
losses = AverageMeter()
metrics = AverageMeter()
for x, y in dataloader:
x = x.to(dtype=torch.float32, device=DEVICE)
y = y.to(dtype=torch.long, device=DEVICE)
with torch.no_grad() :
y_pred = model(x)
loss = criterion(y_pred, y)
losses.update(loss.item())
metrics.update(accuracy_score_torch(y_pred, y))
return losses.avg, metrics.avg<init_hyperparams> | if run_mode == 'train_estimator_RFR':
estimator = RandomForestRegressor(n_estimators=125,
max_features=0.2,
min_samples_split=2,
min_samples_leaf=75,
n_jobs=-1,
random_state=42,
verbose=0)
print("
Preparing to train the following estimator:
{}".format(estimator))
start = time()
estimator.fit(X_train, y_train)
end = time()
print("
Estimator fit time: {} seconds".format(int(round(end - start))))
pred_val = estimator.predict(X_val)
print("
Estimator prediction score on Validation set: \t{}".format(roc_auc_score(y_val, pred_val)))
fi = pd.DataFrame()
fi['feature'] = X_train.columns
fi['importance'] = estimator.feature_importances_
display(fi.sort_values(by=['importance'], ascending=False ).head(10))
| Home Credit Default Risk |
1,276,329 | <split><EOS> | if 'train_estimator_' in run_mode:
pred_test = estimator.predict(test)
print("
Preparing prediction for submission.")
submission = pd.DataFrame()
submission['SK_ID_CURR'] = test_id
submission['TARGET'] = pred_test
submission.head()
file_name = run_mode.split('train_estimator_')[1] + '.csv'
submission.to_csv(file_name, index=False ) | Home Credit Default Risk |
1,185,243 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<compute_test_metric> | warnings.simplefilter(action='ignore', category=FutureWarning)
py.init_notebook_mode(connected=True)
%matplotlib inline
cf.go_offline()
| Home Credit Default Risk |
1,185,243 | cv_score = accuracy_score(train_df[TARGET], np.argmax(oof, axis=1))
logger.info(f'CV: {cv_score:.5f}' )<categorify> | def one_hot_encoder(df, nan_as_category = True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns | Home Credit Default Risk |
1,185,243 | test_transform = A.Compose([
A.Resize(IMSIZE, IMSIZE, p=1),
A.Normalize(( 0.485, 0.456, 0.406),(0.229, 0.224, 0.225)) ,
ToTensorV2() ,
])
test_dataloader = get_dataloader(
submission_df[ID].apply(lambda x: os.path.join(PATH['test_image_dir'], x)) ,
submission_df[TARGET],
transform=test_transform,
with_memory_cache=True,
batch_size=params['test_batch_size'],
shuffle=False,
pin_memory=True,
)<predict_on_test> | num_rows = None
nan_as_category = True | Home Credit Default Risk |
1,185,243 | predictions = np.zeros(( len(submission_df), 10))
for state_dict in best_state_dicts:
model = Model().to(DEVICE)
model.load_state_dict(state_dict)
model.eval()
_predictions = []
for x, _ in test_dataloader:
x = x.to(dtype=torch.float32, device=DEVICE)
with torch.no_grad() :
y_pred = model(x)
_predictions.append(y_pred.cpu().numpy())
_predictions = np.concatenate(_predictions)
predictions += _predictions / len(best_state_dicts )<prepare_output> | print("Start Train Test................." ) | Home Credit Default Risk |
1,185,243 | np.save('oof', oof)
np.save('predictions', predictions)
saved_best_state_dicts = {}
for i, bsd in enumerate(best_state_dicts):
saved_best_state_dicts[f'f{i}'] = bsd
torch.save(saved_best_state_dicts, 'best_state_dicts.pth')
submission_df[TARGET] = np.argmax(predictions, axis=1 ).tolist()<save_to_csv> | df = pd.read_csv('.. /input/application_train.csv', nrows= num_rows)
test_df = pd.read_csv('.. /input/application_test.csv', nrows= num_rows)
print("Train samples: {}, test samples: {}".format(len(df), len(test_df)))
df = df.append(test_df ).reset_index()
del test_df
gc.collect() | Home Credit Default Risk |
1,185,243 | submission_df.to_csv('submission.csv', index=False)
FileLink('submission.csv' )<load_from_csv> | for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = pd.factorize(df[bin_feature])
df, cat_cols = one_hot_encoder(df, nan_as_category)
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True ) | Home Credit Default Risk |
1,185,243 | train_users_game1 = pd.read_csv('/kaggle/input/ds2019uec-task2/train_users_game1.csv')
train_users_game2 = pd.read_csv('/kaggle/input/ds2019uec-task2/train_users_game2.csv')
test_users_game1 = pd.read_csv('/kaggle/input/ds2019uec-task2/test_users_game1.csv')
test_user_ids = pd.read_csv('/kaggle/input/ds2019uec-task2/test_user_ids.csv')
game_group2 = pd.read_csv('/kaggle/input/ds2019uec-task2/game_group2.csv')
sample_submission = pd.read_csv('/kaggle/input/ds2019uec-task2/sample_submission.csv' )<count_duplicates> | df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']
df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL'] | Home Credit Default Risk |
1,185,243 | users_game1 = pd.concat([train_users_game1, test_users_game1] ).drop(['play_hour'], axis=1 ).drop_duplicates()
users_game1['label'] = 1<concatenate> | a = df['DAYS_EMPLOYED_PERC'].tolist()
a = [x for x in a if str(x)!= 'nan']
b = df['INCOME_CREDIT_PERC'].tolist()
b = [x for x in b if str(x)!= 'nan']
c = df['INCOME_PER_PERSON'].tolist()
c = [x for x in c if str(x)!= 'nan']
d = df['ANNUITY_INCOME_PERC'].tolist()
d = [x for x in d if str(x)!= 'nan'] | Home Credit Default Risk |
1,185,243 | user_ids = pd.concat([train_users_game1['user_id'], train_users_game2['user_id'], test_user_ids['user_id']] ).unique()<drop_column> | print("End Train Test.................. " ) | Home Credit Default Risk |
1,185,243 | users_game1_matrix = users_game1.set_index(['user_id', 'game_title'])['label'].unstack().reindex(user_ids ).fillna(0 )<concatenate> | print("Start Bureau................ " ) | Home Credit Default Risk |
1,185,243 | train_user_ids = pd.concat([train_users_game1['user_id'], train_users_game2['user_id']] ).unique()
test_user_ids = test_user_ids['user_id'].values<compute_test_metric> | bureau = pd.read_csv('.. /input/bureau.csv', nrows = num_rows ) | Home Credit Default Risk |
1,185,243 | users_similarity = 1 - cosine_distances(users_game1_matrix.loc[test_user_ids], users_game1_matrix.loc[train_user_ids] )<remove_duplicates> | bb = pd.read_csv('.. /input/bureau_balance.csv', nrows = num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category ) | Home Credit Default Risk |
1,185,243 | users_game2 = train_users_game2.drop(['play_hour', 'predict_game_id'], axis=1 ).drop_duplicates()
users_game2['label'] = 1<filter> | bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
for col in bb_cat:
bb_aggregations[col] = ['mean']
bb_agg = bb.groupby('SK_ID_BUREAU' ).agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist() ])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(columns= 'SK_ID_BUREAU', inplace= True)
del bb, bb_agg
gc.collect() | Home Credit Default Risk |
1,185,243 | test_users_game2_matrix =(users_similarity @ users_game2_matrix.loc[train_user_ids] )<data_type_conversions> | num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'DAYS_CREDIT_UPDATE': ['min', 'max', 'mean'],
'AMT_ANNUITY': ['max', 'mean'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
} | Home Credit Default Risk |
1,185,243 | sample_submission['purchased_games'] = test_users_game2_matrix.apply(lambda x: ' '.join(x.sort_values(ascending=False)[:10].index.astype('str')) , axis=1 )<data_type_conversions> | cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist() ] ) | Home Credit Default Risk |
1,185,243 | sample_submission.loc[test_users_game2_matrix.sum(axis=1)== 0, 'purchased_games'] = ' '.join(train_users_game2['predict_game_id'].value_counts(normalize=True ).index.astype('str')[:10] )<save_to_csv> | active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR' ).agg(num_aggregations)
active_agg.columns = pd.Index(['ACT_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(active_agg, how='left')
del active, active_agg
gc.collect()
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR' ).agg(num_aggregations)
closed_agg.columns = pd.Index(['CLS_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist() ])
bureau_agg = bureau_agg.join(closed_agg, how='left')
del closed, closed_agg, bureau
gc.collect() | Home Credit Default Risk |
1,185,243 | sample_submission.to_csv('submission.csv', index=None )<import_modules> | print("End Bureau................ " ) | Home Credit Default Risk |
1,185,243 | import pandas as pd
import numpy as np
from sklearn import tree
import graphviz
from sklearn.model_selection import cross_val_score<load_from_csv> | print("Start previous_application................ " ) | Home Credit Default Risk |
1,185,243 | df = pd.read_csv('/kaggle/input/predict-the-income-bi-hack/train.csv')
dft = pd.read_csv('/kaggle/input/predict-the-income-bi-hack/test.csv')
print('train dataset length',len(df),'
test dataset length',len(dft))
<drop_column> | prev = pd.read_csv('.. /input/previous_application.csv', nrows = num_rows ) | Home Credit Default Risk |
1,185,243 | ids=dft['ID']
df.drop('ID', axis=1,inplace =True)
dft.drop('ID', axis=1,inplace =True )<categorify> | prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True ) | Home Credit Default Risk |
1,185,243 | df = pd.get_dummies(df, columns=['Work', 'Education', 'Marital_Status', 'Occupation', 'Relationship', 'Race', 'Gender','Nationality'])
dft = pd.get_dummies(dft, columns=['Work', 'Education', 'Marital_Status', 'Occupation', 'Relationship', 'Race', 'Gender','Nationality'] )<drop_column> | prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT'] | Home Credit Default Risk |
1,185,243 | df.drop('Nationality_Holand-Netherlands',axis=1, inplace=True )<count_values> | num_aggregations = {
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_APPLICATION': ['min', 'max', 'mean'],
'AMT_CREDIT': ['min', 'max', 'mean'],
'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],
'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],
'AMT_GOODS_PRICE': ['min', 'max', 'mean'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
} | Home Credit Default Risk |
1,185,243 | df['Income'].value_counts()<feature_engineering> | cat_aggregations = {}
for cat in cat_cols:
cat_aggregations[cat] = ['mean']
prev_agg = prev.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist() ] ) | Home Credit Default Risk |
1,185,243 | df["Income"] = df["Income"].replace({'<=50K':1,'>50K':0} )<create_dataframe> | approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
approved_agg = approved.groupby('SK_ID_CURR' ).agg(num_aggregations)
approved_agg.columns = pd.Index(['APR_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist() ])
prev_agg = prev_agg.join(approved_agg, how='left')
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
refused_agg = refused.groupby('SK_ID_CURR' ).agg(num_aggregations)
refused_agg.columns = pd.Index(['REF_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist() ])
prev_agg = prev_agg.join(refused_agg, how='left')
del refused, refused_agg, approved, approved_agg, prev
gc.collect()
| Home Credit Default Risk |
1,185,243 | d_train = df.copy()
d_test = dft.copy()
df1 = df<prepare_x_and_y> | print("End previous_application................ " ) | Home Credit Default Risk |
1,185,243 | X = d_train.drop('Income', axis=1)
y = d_train['Income']<import_modules> | print("Start POS_CASH_balance................ " ) | Home Credit Default Risk |
1,185,243 | from sklearn.model_selection import train_test_split<import_modules> | pos = pd.read_csv('.. /input/POS_CASH_balance.csv', nrows = num_rows ) | Home Credit Default Risk |
1,185,243 | from sklearn.model_selection import train_test_split<split> | pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)
aggregations = {
'MONTHS_BALANCE': ['max', 'mean', 'size'],
'SK_DPD': ['max', 'mean'],
'SK_DPD_DEF': ['max', 'mean']
} | Home Credit Default Risk |
1,185,243 | X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.30,random_state=42 )<choose_model_class> | for cat in cat_cols:
aggregations[cat] = ['mean']
pos_agg = pos.groupby('SK_ID_CURR' ).agg(aggregations)
pos_agg.columns = pd.Index(['POS_' + e[0] + "_" + e[1].upper() for e in pos_agg.columns.tolist() ])
pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR' ).size()
del pos
gc.collect() | Home Credit Default Risk |
1,185,243 | t = tree.DecisionTreeClassifier(criterion='entropy', max_depth=7 )<train_model> | print("Start POS_CASH_balance................ " ) | Home Credit Default Risk |
1,185,243 | t = t.fit(X_train,y_train )<compute_test_metric> | ins = pd.read_csv('.. /input/installments_payments.csv', nrows = num_rows)
ins, cat_cols = one_hot_encoder(ins, nan_as_category= True ) | Home Credit Default Risk |
1,185,243 | t.score(X_test,y_test )<predict_on_test> | ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']
ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']
ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']
ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']
ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)
ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0 ) | Home Credit Default Risk |
1,185,243 | sol=t.predict(dft)
print(sol )<save_to_csv> | aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DPD': ['max', 'mean', 'sum'],
'DBD': ['max', 'mean', 'sum'],
'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],
'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']
}
for cat in cat_cols:
aggregations[cat] = ['mean']
ins_agg = ins.groupby('SK_ID_CURR' ).agg(aggregations)
ins_agg.columns = pd.Index(['INS_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist() ])
ins_agg['INS_COUNT'] = ins.groupby('SK_ID_CURR' ).size()
del ins
gc.collect() | Home Credit Default Risk |
1,185,243 | with open('income-predicted.csv','w')as fw:
fw.write('ID,Income
')
ct=24001
for i in sol:
s=""
if i==1:
s="<=50K"
else:
s=">50K"
fw.write(str(ct)+','+str(s)+'
')
ct+=1<load_from_csv> | print("End POS_CASH_balance................ " ) | Home Credit Default Risk |
1,185,243 | sdf = pd.read_csv('income-predicted.csv', sep=',')
sdf.head()<count_values> | print("Start credit_card_balance................ " ) | Home Credit Default Risk |
1,185,243 | sdf.Income.value_counts()<load_from_csv> | cc = pd.read_csv('.. /input/credit_card_balance.csv', nrows = num_rows ) | Home Credit Default Risk |
1,185,243 | %%capture
train = pd.read_csv('.. /input/fakenewsvortexbsb/train_df.csv', sep=';', error_bad_lines=False, quoting=3);<categorify> | cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)
cc.drop(columns = ['SK_ID_PREV'], inplace = True)
cc_agg = cc.groupby('SK_ID_CURR' ).agg(['min', 'max', 'mean', 'sum', 'var'])
cc_agg.columns = pd.Index(['CC_' + e[0] + "_" + e[1].upper() for e in cc_agg.columns.tolist() ])
cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR' ).size()
del cc
gc.collect() | Home Credit Default Risk |
1,185,243 | example = train["manchete"][1]
print(unidecode(example))<categorify> | print("End credit_card_balance................ " ) | Home Credit Default Risk |
1,185,243 | letters_only=re.sub("[^a-zA-Z]"," ",unidecode(example))
print(letters_only )<string_transform> | with timer("Process bureau and bureau_balance"):
print("Bureau df shape:", bureau_agg.shape)
df = df.join(bureau_agg, how='left',on='SK_ID_CURR')
gc.collect()
with timer("Process previous_applications"):
print("Previous applications df shape:", prev_agg.shape)
df = df.join(prev_agg, how='left', on='SK_ID_CURR')
gc.collect()
with timer("Process POS-CASH balance"):
print("Pos-cash balance df shape:", pos_agg.shape)
df = df.join(pos_agg, how='left', on='SK_ID_CURR')
gc.collect()
with timer("Process installments payments"):
print("Installments payments df shape:", ins_agg.shape)
df = df.join(ins_agg, how='left', on='SK_ID_CURR')
gc.collect()
with timer("Process credit card balance"):
print("Credit card balance df shape:", cc_agg.shape)
df = df.join(cc_agg, how='left', on='SK_ID_CURR')
gc.collect()
del bureau_agg,prev_agg,pos_agg,ins_agg,cc_agg
gc.collect() | Home Credit Default Risk |
1,185,243 | lower_case=letters_only.lower()
words=lower_case.split()<string_transform> | print("Done.;.............. ")
| Home Credit Default Risk |
1,185,243 | print(stopwords.words("portuguese"))<string_transform> | train_df = df[df['TARGET'].notnull() ]
test_df = df[df['TARGET'].isnull() ] | Home Credit Default Risk |
1,185,243 | stop = stopwords.words("portuguese" )<categorify> | train_df = train_df.drop(['index'],axis=1)
test_df = test_df.drop(['index','TARGET'],axis=1)
train_df = train_df.fillna(0)
test_df = test_df.fillna(0 ) | Home Credit Default Risk |
1,185,243 | lista_stop = [unidecode(x)for x in stop]
print(lista_stop )<define_variables> | label = u'TARGET'
a = list(train_df.columns)
a.remove(label)
labels = train_df[label]
data_only = train_df[list(a)]
col_name = data_only.columns
X_train, X_test, y_train, y_test = train_test_split(data_only, labels, test_size=0.1,random_state = 42 ) | Home Credit Default Risk |
1,185,243 | words=[w for w in words if not w in lista_stop]
print(words )<string_transform> | clf_catboost = CatBoostClassifier(iterations=1200,
learning_rate=0.1,
depth=7,
l2_leaf_reg=40,
bootstrap_type='Bernoulli',
subsample=0.7,
scale_pos_weight=5,
eval_metric='AUC',
metric_period=50,
od_type='Iter',
od_wait=45,
random_seed=15,
allow_writing_files=False)
clf_catboost.fit(data_only,labels,verbose=True ) | Home Credit Default Risk |
1,185,243 | def review_to_words(raw_review):
raw_review = unidecode(raw_review)
raw_review.lstrip('Jovem Pan')
letters_only=re.sub("[^a-zA-Z]"," ",raw_review)
words=letters_only.lower().split()
meaningful_words=[w for w in words if not w in lista_stop]
return(' '.join(meaningful_words))<string_transform> | pred = clf_catboost.predict_proba(test_df)
test_df['TARGET'] = pred[:, 0] | Home Credit Default Risk |
1,185,243 | <find_best_params><EOS> | test_df[['SK_ID_CURR', 'TARGET']].to_csv('submission_catboost1.csv', index= False ) | Home Credit Default Risk |
10,259,780 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<categorify> | color = sns.color_palette()
py.init_notebook_mode(connected=True)
init_notebook_mode(connected=True)
offline.init_notebook_mode()
cf.go_offline()
| Home Credit Default Risk |
10,259,780 | clean_train_review=[]
for i in range(0,num_reviews):
clean_train_review.append(review_to_words(train['manchete'][i]))<normalization> | application_train = pd.read_csv('/kaggle/input/home-credit-default-risk/application_train.csv')
POS_CASH_balance = pd.read_csv('/kaggle/input/home-credit-default-risk/POS_CASH_balance.csv')
bureau_balance = pd.read_csv('/kaggle/input/home-credit-default-risk/bureau_balance.csv')
previous_application = pd.read_csv('/kaggle/input/home-credit-default-risk/previous_application.csv')
installments_payments = pd.read_csv('/kaggle/input/home-credit-default-risk/installments_payments.csv')
credit_card_balance = pd.read_csv('/kaggle/input/home-credit-default-risk/credit_card_balance.csv')
bureau = pd.read_csv('/kaggle/input/home-credit-default-risk/bureau.csv')
application_test = pd.read_csv('/kaggle/input/home-credit-default-risk/application_test.csv' ) | Home Credit Default Risk |
10,259,780 | vectorizer=CountVectorizer(analyzer='word',tokenizer=None,preprocessor = None, stop_words = None,max_features = 7000)
train_data_features=vectorizer.fit_transform(clean_train_review)
train_data_features=train_data_features.toarray()<feature_engineering> | application_train.isnull().mean().sort_values(ascending = False ) | Home Credit Default Risk |
10,259,780 | vcab=vectorizer.get_feature_names()
<prepare_x_and_y> | POS_CASH_balance.isnull().mean().sort_values(ascending = False ) | Home Credit Default Risk |
10,259,780 | train_y = train["Class"]<split> | bureau_balance.isnull().mean().sort_values(ascending = False ) | Home Credit Default Risk |
10,259,780 | X_train, X_test, y_train, y_test = train_test_split(train_data_features, train_y, test_size=0.25, random_state=42 )<train_model> | previous_application.isnull().mean().sort_values(ascending = False ) | Home Credit Default Risk |
10,259,780 | model = KNeighborsClassifier(n_neighbors=3)
%time model = model.fit(X_train, y_train )<predict_on_test> | installments_payments.isnull().mean().sort_values(ascending = False ) | Home Credit Default Risk |
10,259,780 | result = model.predict(X_test )<compute_test_metric> | credit_card_balance.isnull().mean().sort_values(ascending = False ) | Home Credit Default Risk |
10,259,780 | accuracy_score(y_test, result )<compute_test_metric> | bureau.isnull().mean().sort_values(ascending = False ) | Home Credit Default Risk |
10,259,780 | print(classification_report(y_test, result))<choose_model_class> | for c in application_train.columns:
if(c!='SK_ID_CURR')&(application_train[c].dtypes==object):
LE = LabelEncoder()
LE.fit(list(application_train[c].values.astype('str')) + list(application_test[c].values.astype('str')))
application_train[c] = LE.transform(list(application_train[c].values.astype('str')))
application_test[c] = LE.transform(list(application_test[c].values.astype('str')))
application_train.head() | Home Credit Default Risk |
10,259,780 | nb = MultinomialNB()
%time nb = nb.fit(X_train, y_train)
result = nb.predict(X_test )<compute_test_metric> | application_train.fillna(-1, inplace = True ) | Home Credit Default Risk |
10,259,780 | accuracy_score(y_test, result )<compute_test_metric> | X = application_train.drop(['SK_ID_CURR', 'TARGET'],axis=1)
Y = application_train.TARGET
xgb = XGBClassifier(n_estimators=500, max_depth=8, random_state=2018)
xgb.fit(X, Y ) | Home Credit Default Risk |
10,259,780 | print(classification_report(y_test, result))<train_model> | df = application_train.append(application_test ).reset_index()
df['DAYS_EMPLOYED'].replace(365243, -1, inplace= True)
df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']
df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']
previous_application['DAYS_FIRST_DRAWING'].replace(365243, -1, inplace= True)
previous_application['DAYS_FIRST_DUE'].replace(365243, -1, inplace= True)
previous_application['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, -1, inplace= True)
previous_application['DAYS_LAST_DUE'].replace(365243, -1, inplace= True)
previous_application['DAYS_TERMINATION'].replace(365243, -1, inplace= True)
previous_application['APP_CREDIT_PERC'] = previous_application['AMT_APPLICATION'] / previous_application['AMT_CREDIT']
installments_payments['PAYMENT_PERC'] = installments_payments['AMT_PAYMENT'] / installments_payments['AMT_INSTALMENT']
installments_payments['PAYMENT_DIFF'] = installments_payments['AMT_INSTALMENT'] - installments_payments['AMT_PAYMENT']
installments_payments['DPD'] = installments_payments['DAYS_ENTRY_PAYMENT'] - installments_payments['DAYS_INSTALMENT']
installments_payments['DBD'] = installments_payments['DAYS_INSTALMENT'] - installments_payments['DAYS_ENTRY_PAYMENT']
installments_payments['DPD'] = installments_payments['DPD'].apply(lambda x: x if x > 0 else 0)
installments_payments['DBD'] = installments_payments['DBD'].apply(lambda x: x if x > 0 else 0 ) | Home Credit Default Risk |
10,259,780 | clf2 = DecisionTreeClassifier(random_state=42)
%time clf2 = clf2.fit(X_train, y_train)
result = clf2.predict(X_test )<compute_test_metric> | bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
bb_agg = bureau_balance.groupby('SK_ID_BUREAU' ).agg(bb_aggregations)
bb_agg.columns = pd.Index([col[0] + "_" + col[1].upper() for col in bb_agg.columns.tolist() ])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(['SK_ID_BUREAU'], axis=1, inplace= True)
del bb_agg
gc.collect()
aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'DAYS_CREDIT_ENDDATE': ['mean','var'],
'DAYS_CREDIT_UPDATE': ['mean'],
'CREDIT_DAY_OVERDUE': ['mean','var'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'AMT_CREDIT_SUM': ['max', 'mean', 'var'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'var'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'var'],
'AMT_ANNUITY': ['max', 'mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'var']
}
bureau_agg = bureau.groupby('SK_ID_CURR' ).agg({**aggregations})
bureau_agg.columns = pd.Index(['BURO_' + col[0] + "_" + col[1].upper() for col in bureau_agg.columns.tolist() ])
del bureau, bureau_balance
gc.collect()
df = pd.merge(df, bureau_agg, how='left', on='SK_ID_CURR')
del bureau_agg
gc.collect() | Home Credit Default Risk |
10,259,780 | accuracy_score(y_test, result )<compute_test_metric> | for c in previous_application.columns:
if previous_application[c].dtypes==object:
LE = LabelEncoder()
previous_application[c] = LE.fit_transform(list(previous_application[c].values.astype('str')))
aggregations = {
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_APPLICATION': ['min', 'max', 'mean'],
'AMT_CREDIT': ['min', 'max', 'mean'],
'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],
'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],
'AMT_GOODS_PRICE': ['min', 'max', 'mean'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'var'],
}
prev_agg = previous_application.groupby('SK_ID_CURR' ).agg({**aggregations})
prev_agg.columns = pd.Index(['PREV_' + col[0] + "_" + col[1].upper() for col in prev_agg.columns.tolist() ])
del previous_application
gc.collect()
df = pd.merge(df,prev_agg, how='left', on='SK_ID_CURR')
del prev_agg
gc.collect() | Home Credit Default Risk |
10,259,780 | print(classification_report(y_test, result))<predict_on_test> | for c in POS_CASH_balance.columns:
if POS_CASH_balance[c].dtypes==object:
LE = LabelEncoder()
POS_CASH_balance[c] = LE.fit_transform(list(POS_CASH_balance[c].values.astype('str')))
aggregations = {
'MONTHS_BALANCE': ['max', 'mean', 'size'],
'SK_DPD': ['max', 'mean', 'var'],
'SK_DPD_DEF': ['max', 'mean', 'var']
}
pos_agg = POS_CASH_balance.groupby('SK_ID_CURR' ).agg(aggregations)
pos_agg.columns = pd.Index(['POS_' + col[0] + "_" + col[1].upper() for col in pos_agg.columns.tolist() ])
pos_agg['POS_COUNT'] = POS_CASH_balance.groupby('SK_ID_CURR' ).size()
del POS_CASH_balance
gc.collect()
df = pd.merge(df,pos_agg, how='left', on='SK_ID_CURR')
del pos_agg
gc.collect() | Home Credit Default Risk |
10,259,780 | forest = RandomForestClassifier(random_state=42)
%time forest = forest.fit(X_train, y_train)
result = forest.predict(X_test )<compute_test_metric> | for c in installments_payments.columns:
if installments_payments[c].dtypes==object:
LE = LabelEncoder()
installments_payments[c] = LE.fit_transform(list(installments_payments[c].values.astype('str')))
aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DPD': ['max', 'mean', 'sum'],
'DBD': ['max', 'mean', 'sum'],
'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],
'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']
}
ins_agg = installments_payments.groupby('SK_ID_CURR' ).agg(aggregations)
ins_agg.columns = pd.Index(['INSTAL_' + col[0] + "_" + col[1].upper() for col in ins_agg.columns.tolist() ])
ins_agg['INSTAL_COUNT'] = installments_payments.groupby('SK_ID_CURR' ).size()
del installments_payments
gc.collect()
df = pd.merge(df,ins_agg, how='left', on='SK_ID_CURR')
del ins_agg
gc.collect() | Home Credit Default Risk |
10,259,780 | accuracy_score(y_test, result )<compute_test_metric> | for c in credit_card_balance.columns:
if credit_card_balance[c].dtypes==object:
LE = LabelEncoder()
credit_card_balance[c] = LE.fit_transform(list(credit_card_balance[c].values.astype('str')))
credit_card_balance.drop(['SK_ID_PREV'], axis= 1, inplace = True)
cc_agg = credit_card_balance.groupby('SK_ID_CURR' ).agg(['min', 'max', 'mean', 'sum', 'var'])
cc_agg.columns = pd.Index(['CC_' + col[0] + "_" + col[1].upper() for col in cc_agg.columns.tolist() ])
cc_agg['CC_COUNT'] = credit_card_balance.groupby('SK_ID_CURR' ).size()
df = pd.merge(df,cc_agg, how='left', on='SK_ID_CURR')
del credit_card_balance, cc_agg
gc.collect() | Home Credit Default Risk |
10,259,780 | print(classification_report(y_test, result))<train_model> | df_train, df_test = df.iloc[:len(application_train)], df.iloc[len(application_train):]
del application_train, application_test, df
gc.collect() | Home Credit Default Risk |
10,259,780 | clf3 = GradientBoostingClassifier(random_state=42)
%time clf3 = clf3.fit(X_train, y_train)
result = clf3.predict(X_test )<compute_test_metric> | folds = StratifiedKFold(n_splits= 10, shuffle=True, random_state=2020)
sub_preds = np.zeros(df_test.shape[0])
feats = [f for f in df_train.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]
for n_fold,(train_idx, valid_idx)in enumerate(folds.split(df_train[feats], df_train['TARGET'])) :
train_x, train_y = df_train[feats].iloc[train_idx], df_train['TARGET'].iloc[train_idx]
valid_x, valid_y = df_train[feats].iloc[valid_idx], df_train['TARGET'].iloc[valid_idx]
lgb = LGBMClassifier(nthread=4, n_estimators=12000, learning_rate=0.02, num_leaves=31,
colsample_bytree=0.85,subsample=0.9, max_depth=8, reg_alpha=0.0415, reg_lambda=0.073,
min_split_gain=0.022, min_child_weight=39.32, silent=-1, verbose=-1)
lgb.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)],
eval_metric= 'auc', verbose= 200, early_stopping_rounds= 100)
sub_preds += lgb.predict_proba(df_test[feats], num_iteration=lgb.best_iteration_)[:, 1] / folds.n_splits
del lgb, train_x, train_y, valid_x, valid_y
gc.collect()
df_test['TARGET'] = sub_preds | Home Credit Default Risk |
10,259,780 | <compute_test_metric><EOS> | df_test[['SK_ID_CURR', 'TARGET']].to_csv('submission', index= False ) | Home Credit Default Risk |
11,198,944 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<predict_on_test> | pd.set_option('display.max_columns', None)
%matplotlib inline
bureau_balance = pd.read_csv('.. /input/bureau_balance.csv')
bureau_balance['STATUS_mod'] = bureau_balance.STATUS.map({'0':0, '1':1, '2':2, '3':3, '4':4, '5':5, 'X':np.nan, 'C':0} ).map(lambda x: 0 if x=='C' else x ).interpolate(method = 'linear')
bureau_balance['write_off'] = bureau_balance.STATUS.map(lambda x: 1 if x=='5' else 0)
bureau_balance['adj_score'] =(bureau_balance.MONTHS_BALANCE-bureau_balance.MONTHS_BALANCE.min() +1)*bureau_balance.STATUS_mod
bb_month_count = bureau_balance.groupby('SK_ID_BUREAU')['MONTHS_BALANCE'].count()
bb_dpd_sum = bureau_balance.groupby('SK_ID_BUREAU')['STATUS_mod'].sum()
bb_write_off = bureau_balance.groupby('SK_ID_BUREAU')['write_off'].sum()
bb_dpd_sum_2_year = bureau_balance.loc[bureau_balance.MONTHS_BALANCE>=-24].groupby('SK_ID_BUREAU')['STATUS_mod'].sum()
bb_write_off_2_year = bureau_balance.loc[bureau_balance.MONTHS_BALANCE>=-24].groupby('SK_ID_BUREAU')['write_off'].sum()
bb_adj_score = bureau_balance.groupby('SK_ID_BUREAU')['adj_score'].sum()
bb_feature = pd.DataFrame({'bb_month_count':bb_month_count, 'bb_dpd_sum':bb_dpd_sum, 'bb_write_off':bb_write_off, 'bb_dpd_sum_2_year':bb_dpd_sum_2_year,
'bb_write_off_2_year':bb_write_off_2_year, 'bb_adj_score': bb_adj_score} ).reset_index().fillna(0)
del bb_month_count, bb_dpd_sum, bb_write_off, bb_dpd_sum_2_year, bb_write_off_2_year, bb_adj_score, bureau_balance
gc.collect()
bureau = pd.read_csv('.. /input/bureau.csv')
bureau = bureau.sort_values(['SK_ID_CURR', 'DAYS_CREDIT'])
bureau['ADJ_DAYS'] =(bureau.DAYS_CREDIT - bureau.DAYS_CREDIT.min())/(bureau.DAYS_CREDIT.max() - bureau.DAYS_CREDIT.min())+ 0.5
bur_ncount = bureau.groupby('SK_ID_CURR')['SK_ID_BUREAU'].count()
bur_act_count = bureau.loc[bureau.CREDIT_ACTIVE=='Active'].groupby('SK_ID_CURR')['SK_ID_BUREAU'].count()
bur_bad_count = bureau.loc[bureau.CREDIT_ACTIVE=='Bad debt'].groupby('SK_ID_CURR')['SK_ID_BUREAU'].count()
bur_sold_count = bureau.loc[bureau.CREDIT_ACTIVE=='Sold out'].groupby('SK_ID_CURR')['SK_ID_BUREAU'].count()
bur_recent_application = -bureau.groupby('SK_ID_CURR')['DAYS_CREDIT'].max()
bur_eariliest_application = -bureau.groupby('SK_ID_CURR')['DAYS_CREDIT'].min()
bur_max_enddate = -bureau.groupby('SK_ID_CURR')['DAYS_CREDIT_ENDDATE'].max()
bureau['application_interval'] = bureau.groupby('SK_ID_CURR')['DAYS_CREDIT'].diff(-1)
missing_iter = iter(bureau.groupby('SK_ID_CURR')['DAYS_CREDIT'].max())
bureau.application_interval = bureau.application_interval.map(lambda x: -next(missing_iter)if np.isnan(x)else -x)
bur_avg_intervel = bureau.groupby('SK_ID_CURR')['application_interval'].mean()
bur_sd_intervel = bureau.groupby('SK_ID_CURR')['application_interval'].agg('std' ).fillna(0)
bur_max_overdue_days = bureau.groupby('SK_ID_CURR')['CREDIT_DAY_OVERDUE'].max()
bur_active_total_overdue_days = bureau.loc[bureau.CREDIT_ACTIVE=='Active'].groupby('SK_ID_CURR')['CREDIT_DAY_OVERDUE'].sum()
bur_active_max_overdue_days = bureau.loc[bureau.CREDIT_ACTIVE=='Active'].groupby('SK_ID_CURR')['CREDIT_DAY_OVERDUE'].max()
bureau['DAYS_CREDIT_ISPAST'] = bureau.DAYS_CREDIT_ENDDATE.map(lambda x: 1 if x < 0 else 0)
bur_avg_remaining_days = bureau.loc[bureau.DAYS_CREDIT_ENDDATE>0].groupby('SK_ID_CURR')['DAYS_CREDIT_ENDDATE'].mean()
bureau['ADJ_AMT_CREDIT_MAX_OVERDUE'] = bureau.ADJ_DAYS * bureau.AMT_CREDIT_MAX_OVERDUE
bur_total_max_overdue_adj = bureau.groupby('SK_ID_CURR')['ADJ_AMT_CREDIT_MAX_OVERDUE'].sum()
bur_avg_max_overdue = bureau.groupby('SK_ID_CURR')['AMT_CREDIT_MAX_OVERDUE'].mean()
bur_overall_max_overdue = bureau.groupby('SK_ID_CURR')['AMT_CREDIT_MAX_OVERDUE'].max()
bureau['ADJ_CNT_CREDIT_PROLONG'] = bureau.ADJ_DAYS * bureau.CNT_CREDIT_PROLONG
bur_avg_prelonged = bureau.groupby('SK_ID_CURR')['ADJ_CNT_CREDIT_PROLONG'].mean().fillna(0)
bur_max_prelonged = bureau.groupby('SK_ID_CURR')['CNT_CREDIT_PROLONG'].max().fillna(0)
bur_total_prelonged_adj = bureau.groupby('SK_ID_CURR')['ADJ_CNT_CREDIT_PROLONG'].sum().fillna(0)
bureau['ADJ_AMT_CREDIT_SUM_DEBT'] = bureau.ADJ_DAYS * bureau.AMT_CREDIT_SUM
bur_total_amount_adj = bureau.groupby('SK_ID_CURR')['ADJ_AMT_CREDIT_SUM_DEBT'].sum()
bur_avg_amount = bureau.groupby('SK_ID_CURR')['AMT_CREDIT_SUM'].mean()
bur_active_total_amount = bureau.loc[bureau.CREDIT_ACTIVE=='Active'].groupby('SK_ID_CURR')['AMT_CREDIT_SUM'].sum()
bur_active_avg_amount = bureau.loc[bureau.CREDIT_ACTIVE=='Active'].groupby('SK_ID_CURR')['AMT_CREDIT_SUM'].mean()
bur_active_total_debt = bureau.loc[bureau.CREDIT_ACTIVE=='Active'].groupby('SK_ID_CURR')['AMT_CREDIT_SUM_DEBT'].sum()
bur_active_avg_debt = bureau.loc[bureau.CREDIT_ACTIVE=='Active'].groupby('SK_ID_CURR')['AMT_CREDIT_SUM_DEBT'].mean()
bur_active_total_limit = bureau.loc[bureau.CREDIT_ACTIVE=='Active'].groupby('SK_ID_CURR')['AMT_CREDIT_SUM_LIMIT'].sum()
bur_active_avg_limit = bureau.loc[bureau.CREDIT_ACTIVE=='Active'].groupby('SK_ID_CURR')['AMT_CREDIT_SUM_LIMIT'].mean()
bur_active_total_overdue = bureau.loc[bureau.CREDIT_ACTIVE=='Active'].groupby('SK_ID_CURR')['AMT_CREDIT_SUM_OVERDUE'].sum()
bur_active_avg_overdue = bureau.loc[bureau.CREDIT_ACTIVE=='Active'].groupby('SK_ID_CURR')['AMT_CREDIT_SUM_OVERDUE'].mean()
bur_active_ratio_debt_credit =(bur_active_total_debt / bur_active_total_amount.map(lambda x: x+0.1))
bur_active_ratio_overdue_debt =(bur_active_total_overdue / bur_active_total_debt.map(lambda x: x+0.1))
bur_avg_update = bureau.groupby('SK_ID_CURR')['DAYS_CREDIT_UPDATE'].mean()
bur_recent_update = bureau.groupby('SK_ID_CURR')['DAYS_CREDIT_UPDATE'].max()
bur_avg_annuity = bureau.groupby('SK_ID_CURR')['AMT_ANNUITY'].mean()
bur_total_annuity = bureau.groupby('SK_ID_CURR')['AMT_ANNUITY'].sum()
bur_active_total_annuity = bureau.loc[bureau.CREDIT_ACTIVE=='Active'].groupby('SK_ID_CURR')['AMT_ANNUITY'].sum()
bureau['term'] = bureau.AMT_CREDIT_SUM / bureau.AMT_ANNUITY
bur_avg_term = bureau.loc[bureau.term < float('inf')].groupby('SK_ID_CURR')['term'].mean()
bureau_num_feature = pd.DataFrame({'bur_ncount':bur_ncount, 'bur_act_count':bur_act_count, 'bur_bad_count':bur_bad_count, 'bur_sold_count':bur_sold_count,
'bur_recent_application':bur_recent_application,'bur_eariliest_application':bur_eariliest_application, 'bur_max_enddate':bur_max_enddate,
'bur_avg_intervel':bur_avg_intervel, 'bur_sd_intervel':bur_sd_intervel,
'bur_max_overdue_days':bur_max_overdue_days, 'bur_active_total_overdue_days':bur_active_total_overdue_days, 'bur_active_max_overdue_days':bur_active_max_overdue_days,
'bur_avg_remaining_days':bur_avg_remaining_days, 'bur_total_max_overdue_adj':bur_total_max_overdue_adj, 'bur_avg_max_overdue':bur_avg_max_overdue, 'bur_overall_max_overdue':bur_overall_max_overdue,
'bur_avg_prelonged':bur_avg_prelonged, 'bur_max_prelonged':bur_max_prelonged, 'bur_total_prelonged_adj':bur_total_prelonged_adj,
'bur_total_amount_adj':bur_total_amount_adj, 'bur_avg_amount':bur_avg_amount,
'bur_active_total_amount':bur_active_total_amount, 'bur_active_avg_amount':bur_active_avg_amount, 'bur_active_total_debt':bur_active_total_debt, 'bur_active_avg_debt':bur_active_avg_debt,
'bur_active_total_limit':bur_active_total_limit, 'bur_active_avg_limit':bur_active_avg_limit, 'bur_active_total_overdue':bur_active_total_overdue, 'bur_active_avg_overdue':bur_active_avg_overdue,
'bur_active_ratio_debt_credit':bur_active_ratio_debt_credit, 'bur_active_ratio_overdue_debt':bur_active_ratio_overdue_debt,
'bur_avg_update':bur_avg_update, 'bur_recent_update':bur_recent_update,
'bur_avg_annuity':bur_avg_annuity, 'bur_total_annuity':bur_total_annuity, 'bur_active_total_annuity':bur_active_total_annuity, 'bur_avg_term':bur_avg_term} ).reset_index()
fill0_list = ['bur_act_count', 'bur_bad_count', 'bur_sold_count', 'bur_active_total_overdue_days', 'bur_active_max_overdue_days', 'bur_active_total_amount', 'bur_active_avg_amount',
'bur_active_total_debt', 'bur_active_avg_debt', 'bur_active_total_limit', 'bur_active_avg_limit', 'bur_active_total_overdue', 'bur_active_avg_overdue',
'bur_active_ratio_debt_credit', 'bur_active_ratio_overdue_debt', 'bur_active_total_annuity']
bureau_num_feature[fill0_list] = bureau_num_feature[fill0_list].fillna(0)
bureau_cat = pd.get_dummies(bureau[['SK_ID_CURR','CREDIT_ACTIVE', 'CREDIT_CURRENCY', 'CREDIT_TYPE']], prefix='bur')
bureau_cat_feature = bureau_cat.groupby('SK_ID_CURR' ).mean().reset_index()
del bureau_cat
gc.collect()
bureau_bb = bureau[['SK_ID_CURR','SK_ID_BUREAU']].merge(bb_feature, on='SK_ID_BUREAU', how='left')
bb_avg_month = bureau_bb.groupby('SK_ID_CURR')['bb_month_count'].mean()
bb_total_overdue_month = bureau_bb.groupby('SK_ID_CURR')['bb_dpd_sum'].sum()
bb_total_writeoff = bureau_bb.groupby('SK_ID_CURR')['bb_write_off'].sum()
bb_max_overdue_month = bureau_bb.groupby('SK_ID_CURR')['bb_dpd_sum'].max()
bb_max_writeoff = bureau_bb.groupby('SK_ID_CURR')['bb_write_off'].max()
bb_total_overdue_month_2year = bureau_bb.groupby('SK_ID_CURR')['bb_dpd_sum_2_year'].sum()
bb_max_overdue_month_2year= bureau_bb.groupby('SK_ID_CURR')['bb_dpd_sum_2_year'].max()
bb_total_writeoff_2year = bureau_bb.groupby('SK_ID_CURR')['bb_write_off_2_year'].sum()
bb_max_writeoff_2year = bureau_bb.groupby('SK_ID_CURR')['bb_write_off_2_year'].max()
bb_max_score = bureau_bb.groupby('SK_ID_CURR')['bb_adj_score'].max()
bb_total_score = bureau_bb.groupby('SK_ID_CURR')['bb_adj_score'].sum()
bb_avg_score = bureau_bb.groupby('SK_ID_CURR')['bb_adj_score'].mean()
bureau_bb_feature = pd.DataFrame({'bb_avg_month':bb_avg_month,
'bb_total_overdue_month':bb_total_overdue_month, 'bb_total_writeoff':bb_total_writeoff, 'bur_sold_count':bb_max_overdue_month,'bb_max_writeoff':bb_max_writeoff,
'bb_total_overdue_month_2year':bb_total_overdue_month_2year, 'bb_total_writeoff_2year':bb_total_writeoff_2year,
'bb_max_overdue_month_2year':bb_max_overdue_month_2year,'bb_max_writeoff_2year':bb_max_writeoff_2year,
'bb_max_score':bb_max_score, 'bb_total_score':bb_total_score, 'bb_avg_score':bb_avg_score} ).reset_index()
bureau_feature = bureau_num_feature.merge(bureau_cat_feature, on='SK_ID_CURR' ).merge(bureau_bb_feature, on='SK_ID_CURR', how='left')
print(bureau_feature.shape)
bureau_feature.to_csv('bureau_feature.csv', index=False ) | Home Credit Default Risk |
11,198,944 | clf = LogisticRegression(random_state=42, solver='lbfgs')
%time clf = clf.fit(X_train, y_train)
result = clf.predict(X_test )<compute_test_metric> | pd.set_option('display.max_columns', None)
%matplotlib inline
warnings.filterwarnings("ignore")
application_train = pd.read_csv('.. /input/application_train.csv')
credit_card_balance = pd.read_csv('.. /input/credit_card_balance.csv')
credit_card_balance = credit_card_balance.sort_values(['SK_ID_CURR', 'SK_ID_PREV', 'MONTHS_BALANCE'])
ccb_prev_count = credit_card_balance.groupby('SK_ID_CURR')['SK_ID_PREV'].nunique()
credit_card_balance['PAY_MONTH'] = credit_card_balance.CNT_INSTALMENT_MATURE_CUM.map(lambda x: 1 if x > 0 else 0)
ccb_temp = credit_card_balance.groupby(['SK_ID_CURR','SK_ID_PREV'])['PAY_MONTH'].sum().reset_index()
ccb_avg_inst_card = ccb_temp.groupby('SK_ID_CURR')['PAY_MONTH'].mean()
ccb_total_inst_card = ccb_temp.groupby('SK_ID_CURR')['PAY_MONTH'].sum()
ccb_temp = credit_card_balance.groupby(['SK_ID_CURR','SK_ID_PREV'])['AMT_CREDIT_LIMIT_ACTUAL'].mean().reset_index()
ccb_avg_limit_card = ccb_temp.groupby('SK_ID_CURR')['AMT_CREDIT_LIMIT_ACTUAL'].mean()
ccb_max_limit_card = credit_card_balance.groupby('SK_ID_CURR')['AMT_CREDIT_LIMIT_ACTUAL'].max()
ccb_total_limit_card = ccb_temp.groupby('SK_ID_CURR')['AMT_CREDIT_LIMIT_ACTUAL'].sum()
ccb_temp = credit_card_balance.loc[credit_card_balance.CNT_DRAWINGS_CURRENT>0].groupby(['SK_ID_CURR','MONTHS_BALANCE'])['AMT_DRAWINGS_CURRENT', 'CNT_DRAWINGS_CURRENT'].sum().reset_index()
ccb_temp['avg_drawing_amount'] =(ccb_temp.AMT_DRAWINGS_CURRENT / ccb_temp.CNT_DRAWINGS_CURRENT ).fillna(0)
ccb_avg_drawing_amount = ccb_temp.groupby('SK_ID_CURR')['avg_drawing_amount'].mean().fillna(0)
ccb_count_rej = credit_card_balance.groupby(['SK_ID_CURR'])['NAME_CONTRACT_STATUS'].agg(lambda x: np.sum(x=='Refused'))
last_month_credit = credit_card_balance.groupby(['SK_ID_CURR','SK_ID_PREV'])['MONTHS_BALANCE'].max().reset_index()
last_month_credit = last_month_credit.merge(credit_card_balance, on=['SK_ID_CURR','SK_ID_PREV', 'MONTHS_BALANCE'])
ccb_cur_total_receivable = last_month_credit.groupby('SK_ID_CURR')['AMT_TOTAL_RECEIVABLE'].sum()
ccb_cur_total_limit = last_month_credit.loc[last_month_credit.NAME_CONTRACT_STATUS == 'Active'].groupby('SK_ID_CURR')['AMT_CREDIT_LIMIT_ACTUAL'].sum()
ccb_cur_total_payment = last_month_credit.groupby('SK_ID_CURR')['AMT_INST_MIN_REGULARITY'].sum()
ccb_cur_total_balance = last_month_credit.groupby('SK_ID_CURR')['AMT_BALANCE'].sum()
ccb_temp = credit_card_balance.loc[credit_card_balance.MONTHS_BALANCE>=-12]
ccb_drawing_amount_1y = ccb_temp.groupby('SK_ID_CURR')['AMT_DRAWINGS_CURRENT'].sum()
ccb_drawing_times_1y = ccb_temp.groupby('SK_ID_CURR')['CNT_DRAWINGS_CURRENT'].sum()
ccb_temp = credit_card_balance.loc[credit_card_balance.MONTHS_BALANCE>=-6]
ccb_drawing_amount_6m = ccb_temp.groupby('SK_ID_CURR')['AMT_DRAWINGS_CURRENT'].sum()
ccb_drawing_times_6m = ccb_temp.groupby('SK_ID_CURR')['CNT_DRAWINGS_CURRENT'].sum()
ccb_temp = credit_card_balance[['SK_ID_CURR', 'SK_ID_PREV', 'SK_DPD', 'SK_DPD_DEF']].groupby(['SK_ID_CURR','SK_ID_PREV'])['SK_DPD','SK_DPD_DEF'].max().reset_index()
ccb_max_dpd_days = ccb_temp.groupby('SK_ID_CURR')['SK_DPD'].max()
ccb_total_dpd_days = ccb_temp.groupby('SK_ID_CURR')['SK_DPD'].sum()
ccb_max_largedpd_days = ccb_temp.groupby('SK_ID_CURR')['SK_DPD_DEF'].max()
ccb_total_largedpd_days = ccb_temp.groupby('SK_ID_CURR')['SK_DPD_DEF'].sum()
ccb_feature = pd.DataFrame({'ccb_prev_count':ccb_prev_count, 'ccb_avg_inst_card':ccb_avg_inst_card, 'ccb_avg_limit_card':ccb_avg_limit_card, 'ccb_total_inst_card':ccb_total_inst_card, 'ccb_count_rej': ccb_count_rej,
'ccb_avg_limit_card':ccb_avg_limit_card, 'ccb_max_limit_card':ccb_max_limit_card, 'ccb_total_limit_card':ccb_total_limit_card, 'ccb_avg_drawing_amount':ccb_avg_drawing_amount,
'ccb_cur_total_receivable':ccb_cur_total_receivable, 'ccb_cur_total_limit':ccb_cur_total_limit, 'ccb_cur_total_payment':ccb_cur_total_payment, 'ccb_cur_total_balance':ccb_cur_total_balance,
'ccb_drawing_amount_1y':ccb_drawing_amount_1y, 'ccb_drawing_times_1y':ccb_drawing_times_1y, 'ccb_drawing_amount_6m':ccb_drawing_amount_6m, 'ccb_drawing_times_6m':ccb_drawing_times_6m,
'ccb_max_dpd_days':ccb_max_dpd_days, 'ccb_total_dpd_days':ccb_total_dpd_days, 'ccb_max_largedpd_days':ccb_max_largedpd_days, 'ccb_total_largedpd_days':ccb_total_largedpd_days} ).reset_index()
ccb_feature.to_csv('ccb_feature.csv', index=False ) | Home Credit Default Risk |
11,198,944 | accuracy_score(y_test, result )<compute_test_metric> | pd.set_option('display.max_columns', None)
%matplotlib inline
warnings.filterwarnings("ignore")
installments_payments = pd.read_csv('.. /input/installments_payments.csv')
installments_payments = installments_payments.sort_values(['SK_ID_CURR', 'SK_ID_PREV', 'NUM_INSTALMENT_NUMBER'])
previous_application = pd.read_csv(".. /input/previous_application.csv")
previous_application = previous_application.sort_values(['SK_ID_CURR', 'DAYS_DECISION'])
recent_record = installments_payments.merge(installments_payments.groupby('SK_ID_CURR')['SK_ID_PREV'].max().reset_index().drop('SK_ID_CURR', axis=1), on='SK_ID_PREV')
ip_recent_term = recent_record.groupby('SK_ID_CURR')['NUM_INSTALMENT_NUMBER'].max()
ip_recent_total_actual_payment = recent_record.groupby('SK_ID_CURR')['AMT_PAYMENT'].sum()
ip_recent_total_required_payment = recent_record.groupby('SK_ID_CURR')['AMT_INSTALMENT'].sum()
recent_record['is_late'] =(recent_record.DAYS_INSTALMENT < recent_record.DAYS_ENTRY_PAYMENT ).map(lambda x: 1 if x==True else 0 ).fillna(0)
recent_record['is_less'] =(recent_record.AMT_INSTALMENT > recent_record.AMT_PAYMENT ).map(lambda x: 1 if x==True else 0 ).fillna(0)
ip_recent_total_late_times = recent_record.groupby('SK_ID_CURR')['is_late'].sum()
ip_recent_total_less_times = recent_record.groupby('SK_ID_CURR')['is_less'].sum()
ip_temp1 = recent_record.loc[recent_record.is_late==1]
ip_temp2 = recent_record.loc[recent_record.is_less==1]
ip_temp1['total_late'] = ip_temp1.DAYS_ENTRY_PAYMENT - ip_temp1.DAYS_INSTALMENT
ip_temp2['total_less'] = ip_temp2.AMT_INSTALMENT - ip_temp2.AMT_PAYMENT
ip_recent_total_late_days = ip_temp1.groupby('SK_ID_CURR')['total_late'].sum()
ip_recent_total_less_amount = ip_temp2.groupby('SK_ID_CURR')['total_less'].sum()
del ip_temp1, ip_temp2
gc.collect()
ip_prev_count = installments_payments.groupby('SK_ID_CURR')['SK_ID_PREV'].nunique()
ip_payment_count = installments_payments.groupby('SK_ID_CURR')['SK_ID_PREV'].count()
installments_payments['IS_CREDIT'] = installments_payments.NUM_INSTALMENT_VERSION.map(lambda x: 1 if x==0 else 0)
ip_creditcard_user = installments_payments.groupby('SK_ID_CURR')['IS_CREDIT'].sum().map(lambda x: 1 if x>0 else 0)
ip_creditcard_count = installments_payments.groupby(['SK_ID_CURR','SK_ID_PREV'])['IS_CREDIT'].sum().map(lambda x: 1 if x>0 else 0 ).reset_index().groupby('SK_ID_CURR')['IS_CREDIT'].sum()
ip_temp =(installments_payments.groupby(['SK_ID_CURR','SK_ID_PREV'])['NUM_INSTALMENT_VERSION'].nunique() -1 ).reset_index()
ip_total_change_times = ip_temp.groupby('SK_ID_CURR')['NUM_INSTALMENT_VERSION'].sum()
ip_avg_change_times = ip_temp.groupby('SK_ID_CURR')['NUM_INSTALMENT_VERSION'].mean()
ip_temp = installments_payments.groupby(['SK_ID_CURR', 'SK_ID_PREV'])['NUM_INSTALMENT_NUMBER'].max().reset_index()
ip_avg_instl = ip_temp.groupby('SK_ID_CURR')['NUM_INSTALMENT_NUMBER'].mean()
ip_max_instl = ip_temp.groupby('SK_ID_CURR')['NUM_INSTALMENT_NUMBER'].max()
installments_payments['is_late'] =(installments_payments.DAYS_INSTALMENT < installments_payments.DAYS_ENTRY_PAYMENT ).map(lambda x: 1 if x==True else 0 ).fillna(0)
installments_payments['is_less'] =(installments_payments.AMT_INSTALMENT > installments_payments.AMT_PAYMENT ).map(lambda x: 1 if x==True else 0 ).fillna(0)
ip_total_late_times = installments_payments.groupby('SK_ID_CURR')['is_late'].sum()
ip_total_less_times = installments_payments.groupby('SK_ID_CURR')['is_less'].sum()
ip_temp1 = installments_payments.loc[installments_payments.is_late==1]
ip_temp2 = installments_payments.loc[installments_payments.is_less==1]
ip_temp1['total_late'] = ip_temp1.DAYS_ENTRY_PAYMENT - ip_temp1.DAYS_INSTALMENT
ip_temp2['total_less'] = ip_temp2.AMT_INSTALMENT - ip_temp2.AMT_PAYMENT
ip_total_late_days = ip_temp1.groupby('SK_ID_CURR')['total_late'].sum()
ip_total_less_amount = ip_temp2.groupby('SK_ID_CURR')['total_less'].sum()
del ip_temp1, ip_temp2
gc.collect()
ip_total_actual_payment = installments_payments.groupby('SK_ID_CURR')['AMT_PAYMENT'].sum()
ip_total_required_payment = installments_payments.groupby('SK_ID_CURR')['AMT_INSTALMENT'].sum()
ip_1y = installments_payments.loc[installments_payments.DAYS_ENTRY_PAYMENT>-365]
ip_payment_count_1y = ip_1y.groupby('SK_ID_CURR')['SK_ID_PREV'].count()
ip_creditcard_count_1y = ip_1y.groupby(['SK_ID_CURR','SK_ID_PREV'])['IS_CREDIT'].sum().map(lambda x: 1 if x>0 else 0 ).reset_index().groupby('SK_ID_CURR')['IS_CREDIT'].sum()
ip_total_late_times_1y = ip_1y.groupby('SK_ID_CURR')['is_late'].sum()
ip_total_less_times_1y = ip_1y.groupby('SK_ID_CURR')['is_less'].sum()
ip_temp = ip_1y.groupby(['SK_ID_CURR', 'SK_ID_PREV'])['NUM_INSTALMENT_NUMBER'].max().reset_index()
ip_avg_instl_1y = ip_temp.groupby('SK_ID_CURR')['NUM_INSTALMENT_NUMBER'].mean()
ip_temp1 = ip_1y.loc[ip_1y.is_late==1]
ip_temp2 = ip_1y.loc[ip_1y.is_less==1]
ip_temp1['total_late'] = ip_temp1.DAYS_ENTRY_PAYMENT - ip_temp1.DAYS_INSTALMENT
ip_temp2['total_less'] = ip_temp2.AMT_INSTALMENT - ip_temp2.AMT_PAYMENT
ip_total_late_days_1y = ip_temp1.groupby('SK_ID_CURR')['total_late'].sum()
ip_total_less_amount_1y = ip_temp2.groupby('SK_ID_CURR')['total_less'].sum()
del ip_temp1, ip_temp2
gc.collect()
ip_total_actual_payment_1y = ip_1y.groupby('SK_ID_CURR')['AMT_PAYMENT'].sum()
ip_total_required_payment_1y = ip_1y.groupby('SK_ID_CURR')['AMT_INSTALMENT'].sum()
ip_6m = installments_payments.loc[installments_payments.DAYS_ENTRY_PAYMENT>-180]
ip_payment_count_6m = ip_6m.groupby('SK_ID_CURR')['SK_ID_PREV'].count()
ip_creditcard_count_6m = ip_6m.groupby(['SK_ID_CURR','SK_ID_PREV'])['IS_CREDIT'].sum().map(lambda x: 1 if x>0 else 0 ).reset_index().groupby('SK_ID_CURR')['IS_CREDIT'].sum()
ip_total_late_times_6m = ip_6m.groupby('SK_ID_CURR')['is_late'].sum()
ip_total_less_times_6m = ip_6m.groupby('SK_ID_CURR')['is_less'].sum()
ip_temp = ip_6m.groupby(['SK_ID_CURR', 'SK_ID_PREV'])['NUM_INSTALMENT_NUMBER'].max().reset_index()
ip_avg_instl_6m = ip_temp.groupby('SK_ID_CURR')['NUM_INSTALMENT_NUMBER'].mean()
ip_temp1 = ip_6m.loc[ip_6m.is_late==1]
ip_temp2 = ip_6m.loc[ip_6m.is_less==1]
ip_temp1['total_late'] = ip_temp1.DAYS_ENTRY_PAYMENT - ip_temp1.DAYS_INSTALMENT
ip_temp2['total_less'] = ip_temp2.AMT_INSTALMENT - ip_temp2.AMT_PAYMENT
ip_total_late_days_6m = ip_temp1.groupby('SK_ID_CURR')['total_late'].sum()
ip_total_less_amount_6m = ip_temp2.groupby('SK_ID_CURR')['total_less'].sum()
del ip_temp1, ip_temp2
gc.collect()
ip_total_actual_payment_6m = ip_6m.groupby('SK_ID_CURR')['AMT_PAYMENT'].sum()
ip_total_required_payment_6m = ip_6m.groupby('SK_ID_CURR')['AMT_INSTALMENT'].sum()
ip_feature = pd.DataFrame({'ip_prev_count':ip_prev_count, 'ip_payment_count':ip_payment_count, 'ip_creditcard_user':ip_creditcard_user, 'ip_creditcard_count':ip_creditcard_count,
'ip_total_change_times':ip_total_change_times, 'ip_avg_change_times':ip_avg_change_times, 'ip_avg_instl':ip_avg_instl, 'ip_max_instl':ip_max_instl,
'ip_total_late_times':ip_total_late_times, 'ip_total_less_times':ip_total_less_times, 'ip_total_late_days':ip_total_late_days, 'ip_total_less_amount':ip_total_less_amount,
'ip_total_actual_payment':ip_total_actual_payment, 'ip_total_required_payment':ip_total_required_payment,
'ip_recent_term':ip_recent_term, 'ip_recent_total_actual_payment':ip_recent_total_actual_payment, 'ip_recent_total_required_payment':ip_recent_total_required_payment,
'ip_recent_total_late_times':ip_recent_total_late_times, 'ip_recent_total_less_times':ip_recent_total_less_times,
'ip_recent_total_late_days':ip_recent_total_late_days, 'ip_recent_total_less_amount':ip_recent_total_less_amount} ).fillna(0)
ip_1y = pd.DataFrame({'ip_payment_count_1y':ip_payment_count_1y, 'ip_creditcard_count_1y': ip_creditcard_count_1y, 'ip_avg_instl_1y':ip_avg_instl_1y,
'ip_total_late_times_1y':ip_total_late_times_1y, 'ip_total_less_times_1y':ip_total_less_times_1y, 'ip_total_late_days_1y':ip_total_late_days_1y,
'ip_total_less_amount_1y':ip_total_less_amount_1y, 'ip_total_actual_payment_1y':ip_total_actual_payment_1y, 'ip_total_required_payment_1y':ip_total_required_payment_1y} ).reset_index().fillna(0)
ip_6m = pd.DataFrame({'ip_payment_count_6m':ip_payment_count_6m, 'ip_creditcard_count_6m': ip_creditcard_count_6m, 'ip_avg_instl_6m':ip_avg_instl_6m,
'ip_total_late_times_6m':ip_total_late_times_6m, 'ip_total_less_times_6m':ip_total_less_times_6m, 'ip_total_late_days_6m':ip_total_late_days_6m,
'ip_total_less_amount_6m':ip_total_less_amount_6m, 'ip_total_actual_payment_6m':ip_total_actual_payment_6m, 'ip_total_required_payment_6m':ip_total_required_payment_6m} ).reset_index().fillna(0)
ip_feature = ip_feature.merge(ip_1y, on='SK_ID_CURR', how='left' ).merge(ip_6m, on='SK_ID_CURR', how='left')
ip_feature['ip_active_1y'] = ip_feature.ip_total_late_times_1y.notnull().map(lambda x: 1 if x==True else 0)
ip_feature['ip_active_6m'] = ip_feature.ip_total_late_times_6m.notnull().map(lambda x: 1 if x==True else 0)
ACCOUNT_1Y = installments_payments.groupby('SK_ID_PREV')['DAYS_ENTRY_PAYMENT'].max().map(lambda x: 1 if x>=-365 else 0)
ACCOUNT_6M = installments_payments.groupby('SK_ID_PREV')['DAYS_ENTRY_PAYMENT'].max().map(lambda x: 1 if x>=-180 else 0)
ACTIVE_ACCOUNT = pd.DataFrame({'ACCOUNT_1Y':ACCOUNT_1Y, 'ACCOUNT_6M':ACCOUNT_6M} ).reset_index()
pa_ip = previous_application[['SK_ID_CURR', 'SK_ID_PREV', 'NAME_CONTRACT_TYPE']].merge(ACTIVE_ACCOUNT, on='SK_ID_PREV', how='left')
COUNT_1Y = pd.get_dummies(pa_ip.loc[pa_ip.ACCOUNT_1Y==1, ['SK_ID_CURR', 'NAME_CONTRACT_TYPE']], prefix='ip_count_1y_' ).groupby('SK_ID_CURR' ).sum().reset_index()
COUNT_6M = pd.get_dummies(pa_ip.loc[pa_ip.ACCOUNT_6M==1, ['SK_ID_CURR', 'NAME_CONTRACT_TYPE']], prefix='ip_count_6m_' ).groupby('SK_ID_CURR' ).sum().reset_index()
ip_feature = ip_feature.merge(COUNT_1Y, on='SK_ID_CURR', how='left' ).merge(COUNT_6M, on='SK_ID_CURR', how='left')
for i in range(-6, 0):
ip_feature.iloc[:, i] = ip_feature.iloc[:, i].fillna(0)
ip_feature.shape
ip_feature.to_csv('ip_feature.csv', index=False ) | Home Credit Default Risk |
11,198,944 | print(classification_report(y_test, result))<predict_on_test> | pd.set_option('display.max_columns', None)
%matplotlib inline
warnings.filterwarnings("ignore")
application_train = pd.read_csv('.. /input/application_train.csv')
POS_CASH_balance = pd.read_csv('.. /input/POS_CASH_balance.csv')
POS_CASH_balance = POS_CASH_balance.sort_values(['SK_ID_CURR', 'SK_ID_PREV', 'MONTHS_BALANCE'])
pcb_prev_count = POS_CASH_balance.groupby('SK_ID_CURR')['SK_ID_PREV'].nunique()
pcb_avg_month = POS_CASH_balance.groupby('SK_ID_CURR')['MONTHS_BALANCE'].count() /pcb_prev_count
pcb_recent_active = POS_CASH_balance.groupby('SK_ID_CURR')['MONTHS_BALANCE'].max()
pcb_temp_inst_change_time = POS_CASH_balance[['SK_ID_PREV', 'CNT_INSTALMENT']].groupby('SK_ID_PREV')['CNT_INSTALMENT'].nunique().map(lambda x: x - 1 ).reset_index().rename(columns={'CNT_INSTALMENT':'pcb_prev_inst_change_time'})
pcb_temp = POS_CASH_balance.groupby(['SK_ID_CURR', 'SK_ID_PREV'])['MONTHS_BALANCE'].count().reset_index()
pcb_temp = pcb_temp.merge(pcb_temp_inst_change_time, on='SK_ID_PREV')
pcb_inst_change_time = pcb_temp.groupby('SK_ID_CURR')['pcb_prev_inst_change_time'].sum()
pcb_temp = POS_CASH_balance.groupby(['SK_ID_CURR', 'SK_ID_PREV'])['CNT_INSTALMENT'].mean().reset_index()
pcb_avg_inst = pcb_temp.groupby(['SK_ID_CURR'])['CNT_INSTALMENT'].mean()
pcb_temp = POS_CASH_balance.groupby(['SK_ID_CURR', 'SK_ID_PREV'])['MONTHS_BALANCE'].max().reset_index()
pcb_temp = pcb_temp.merge(POS_CASH_balance[['SK_ID_CURR', 'SK_ID_PREV', 'MONTHS_BALANCE', 'NAME_CONTRACT_STATUS']], on=['SK_ID_CURR', 'SK_ID_PREV', 'MONTHS_BALANCE'])
pcb_temp['active_1'] = pcb_temp.MONTHS_BALANCE.map(lambda x: 1 if x>=-4 else 0)
pcb_temp['active_2'] = pcb_temp.NAME_CONTRACT_STATUS.map(lambda x: 1 if x=='Active' else 0)
pcb_temp['active'] = pcb_temp.active_1 * pcb_temp.active_2
pcb_active_inst = pcb_temp.groupby('SK_ID_CURR')['active'].count()
pcb_temp = POS_CASH_balance[['SK_ID_CURR', 'SK_ID_PREV', 'SK_DPD', 'SK_DPD_DEF']].groupby(['SK_ID_CURR','SK_ID_PREV'])['SK_DPD','SK_DPD_DEF'].max().reset_index()
pcb_max_dpd_days = pcb_temp.groupby('SK_ID_CURR')['SK_DPD'].max()
pcb_total_dpd_days = pcb_temp.groupby('SK_ID_CURR')['SK_DPD'].sum()
pcb_max_largedpd_days = pcb_temp.groupby('SK_ID_CURR')['SK_DPD_DEF'].max()
pcb_total_largedpd_days = pcb_temp.groupby('SK_ID_CURR')['SK_DPD_DEF'].sum()
pcb_temp = POS_CASH_balance.loc[POS_CASH_balance.MONTHS_BALANCE>=-12, ['SK_ID_CURR', 'SK_ID_PREV', 'SK_DPD', 'SK_DPD_DEF']].groupby(['SK_ID_CURR','SK_ID_PREV'])['SK_DPD','SK_DPD_DEF'].max().reset_index()
pcb_max_dpd_days_1y = pcb_temp.groupby('SK_ID_CURR')['SK_DPD'].max()
pcb_total_dpd_days_1y = pcb_temp.groupby('SK_ID_CURR')['SK_DPD'].sum()
pcb_max_largedpd_days_1y = pcb_temp.groupby('SK_ID_CURR')['SK_DPD_DEF'].max()
pcb_total_largedpd_days_1y = pcb_temp.groupby('SK_ID_CURR')['SK_DPD_DEF'].sum()
pcb_temp = POS_CASH_balance.loc[POS_CASH_balance.MONTHS_BALANCE>=-24, ['SK_ID_CURR', 'SK_ID_PREV', 'SK_DPD', 'SK_DPD_DEF']].groupby(['SK_ID_CURR','SK_ID_PREV'])['SK_DPD','SK_DPD_DEF'].max().reset_index()
pcb_max_dpd_days_2y = pcb_temp.groupby('SK_ID_CURR')['SK_DPD'].max()
pcb_total_dpd_days_2y = pcb_temp.groupby('SK_ID_CURR')['SK_DPD'].sum()
pcb_max_largedpd_days_2y = pcb_temp.groupby('SK_ID_CURR')['SK_DPD_DEF'].max()
pcb_total_largedpd_days_2y = pcb_temp.groupby('SK_ID_CURR')['SK_DPD_DEF'].sum()
pcb_num = pd.DataFrame({'pcb_prev_count':pcb_prev_count, 'pcb_avg_month':pcb_avg_month, 'pcb_recent_active':pcb_recent_active, 'pcb_inst_change_time':pcb_inst_change_time,
'pcb_avg_inst':pcb_avg_inst, 'pcb_active_inst':pcb_active_inst,
'pcb_max_dpd_days':pcb_max_dpd_days, 'pcb_total_dpd_days':pcb_total_dpd_days, 'pcb_max_largedpd_days':pcb_max_largedpd_days, 'pcb_total_largedpd_days':pcb_total_largedpd_days,
'pcb_max_dpd_days_1y':pcb_max_dpd_days_1y, 'pcb_total_dpd_days_1y':pcb_total_dpd_days_1y, 'pcb_max_largedpd_days_1y':pcb_max_largedpd_days_1y,
'pcb_total_largedpd_days_1y':pcb_total_largedpd_days_1y, 'pcb_max_dpd_days_2y':pcb_max_dpd_days_2y, 'pcb_total_dpd_days_2y':pcb_total_dpd_days_2y,
'pcb_max_largedpd_days_2y':pcb_max_largedpd_days_2y, 'pcb_total_largedpd_days_2y':pcb_total_largedpd_days_2y,} ).reset_index()
pcb_temp = POS_CASH_balance.groupby(['SK_ID_CURR', 'SK_ID_PREV'])['MONTHS_BALANCE'].max().reset_index()
pcb_temp = pcb_temp.merge(POS_CASH_balance[['SK_ID_CURR', 'SK_ID_PREV', 'MONTHS_BALANCE', 'NAME_CONTRACT_STATUS']], on=['SK_ID_CURR', 'SK_ID_PREV', 'MONTHS_BALANCE'])
pcb_temp_dummy = pd.get_dummies(pcb_temp, prefix='pcb_end_as')
pcb_end_as_dummy = pcb_temp_dummy.loc[pcb_temp_dummy.pcb_end_as_Active!=1].drop(['SK_ID_PREV','MONTHS_BALANCE', 'pcb_end_as_Active'], axis=1 ).groupby('SK_ID_CURR' ).sum().reset_index()
del pcb_temp_dummy
gc.collect()
pcb_feature = pcb_num.merge(pcb_end_as_dummy, on='SK_ID_CURR', how='left')
pcb_feature['pcb_no_dpd'] = pcb_feature.pcb_total_dpd_days.map(lambda x: 0 if x== 0 else 1)
pcb_feature['pcb_no_largedpd'] = pcb_feature.pcb_total_largedpd_days.map(lambda x: 0 if x== 0 else 1)
pcb_feature['pcb_no_dpd_1y'] = pcb_feature.pcb_total_dpd_days_1y.map(lambda x: 0 if x== 0 else 1)
pcb_feature['pcb_no_largedpd_1y'] = pcb_feature.pcb_total_largedpd_days_1y.map(lambda x: 0 if x== 0 else 1)
pcb_feature['pcb_no_dpd_2y'] = pcb_feature.pcb_total_dpd_days_2y.map(lambda x: 0 if x== 0 else 1)
pcb_feature['pcb_no_largedpd_2y'] = pcb_feature.pcb_total_largedpd_days_2y.map(lambda x: 0 if x== 0 else 1)
pcb_feature.to_csv('pcb_feature.csv', index=False ) | Home Credit Default Risk |
11,198,944 | clf4 = SVC(random_state=42)
%time clf4 = clf4.fit(X_train, y_train)
result = clf4.predict(X_test )<compute_test_metric> | pd.set_option('display.max_columns', None)
%matplotlib inline
warnings.filterwarnings("ignore")
previous_application = pd.read_csv(".. /input/previous_application.csv")
previous_application = previous_application.sort_values(['SK_ID_CURR', 'DAYS_DECISION'])
cat_col = []
for i in range(len(previous_application.columns)) :
if previous_application.iloc[:, i].dtype == 'object':
cat_col.append(i)
cat_pa = previous_application.iloc[:, cat_col]
cat_pa = pd.concat([previous_application[['HOUR_APPR_PROCESS_START', 'NFLAG_LAST_APPL_IN_DAY', 'NFLAG_INSURED_ON_APPROVAL']],cat_pa], axis=1 ).fillna('XNA')
cat_pa.HOUR_APPR_PROCESS_START = cat_pa.HOUR_APPR_PROCESS_START.astype('object')
cat_pa.NFLAG_LAST_APPL_IN_DAY = cat_pa.NFLAG_LAST_APPL_IN_DAY.astype('object')
cat_pa.NFLAG_INSURED_ON_APPROVAL = cat_pa.NFLAG_INSURED_ON_APPROVAL.astype('object')
cat_NAME_CONTRACT_TYPE = pd.get_dummies(cat_pa.NAME_CONTRACT_TYPE, prefix='pa_NAME_CONTRACT_TYPE' ).drop('pa_NAME_CONTRACT_TYPE_XNA', axis=1)
cat_NAME_CASH_LOAN_PURPOSE = pd.get_dummies(cat_pa.NAME_CASH_LOAN_PURPOSE, prefix='pa_NAME_CASH_LOAN_PURPOSE' ).drop('pa_NAME_CASH_LOAN_PURPOSE_XNA', axis=1)
cat_NAME_CONTRACT_STATUS = pd.get_dummies(cat_pa.NAME_CONTRACT_STATUS, prefix='pa_NAME_CONTRACT_STATUS')
cat_NAME_PAYMENT_TYPE = pd.get_dummies(cat_pa.NAME_PAYMENT_TYPE, prefix='pa_NAME_PAYMENT_TYPE' ).drop('pa_NAME_PAYMENT_TYPE_XNA', axis=1)
cat_CODE_REJECT_REASON = pd.get_dummies(cat_pa.CODE_REJECT_REASON, prefix='pa_CODE_REJECT_REASON' ).drop('pa_CODE_REJECT_REASON_XNA', axis=1)
cat_NAME_CLIENT_TYPE = pd.get_dummies(cat_pa.NAME_CLIENT_TYPE, prefix='pa_NAME_CLIENT_TYPE' ).drop('pa_NAME_CLIENT_TYPE_XNA', axis=1)
cat_NAME_PORTFOLIO = pd.get_dummies(cat_pa.NAME_PORTFOLIO, prefix='pa_NAME_PORTFOLIO' ).drop('pa_NAME_PORTFOLIO_XNA', axis=1)
cat_NAME_PRODUCT_TYPE = pd.get_dummies(cat_pa.NAME_PRODUCT_TYPE, prefix='pa_NAME_PRODUCT_TYPE' ).drop('pa_NAME_PRODUCT_TYPE_XNA', axis=1)
cat_NAME_YIELD_GROUP = pd.get_dummies(cat_pa.NAME_YIELD_GROUP, prefix='pa_NAME_YIELD_GROUP' ).drop('pa_NAME_YIELD_GROUP_XNA', axis=1)
cat_PRODUCT_COMBINATION = pd.get_dummies(cat_pa.PRODUCT_COMBINATION, prefix='pa_PRODUCT_COMBINATION' ).drop('pa_PRODUCT_COMBINATION_XNA', axis=1)
cat_NFLAG_INSURED_ON_APPROVAL = pd.get_dummies(cat_pa.NFLAG_INSURED_ON_APPROVAL, prefix='pa_NFLAG_INSURED_ON_APPROVAL' ).drop('pa_NFLAG_INSURED_ON_APPROVAL_XNA', axis=1)
cat_pa_dummy = pd.concat([previous_application[['SK_ID_PREV', 'SK_ID_CURR']], cat_NAME_CONTRACT_TYPE, cat_NAME_CASH_LOAN_PURPOSE, cat_NAME_CONTRACT_STATUS, cat_NAME_PAYMENT_TYPE, cat_CODE_REJECT_REASON,
cat_NAME_CLIENT_TYPE, cat_NAME_PORTFOLIO, cat_NAME_PRODUCT_TYPE, cat_NAME_YIELD_GROUP, cat_PRODUCT_COMBINATION, cat_NFLAG_INSURED_ON_APPROVAL], axis=1)
del cat_NAME_CONTRACT_TYPE, cat_NAME_CASH_LOAN_PURPOSE, cat_NAME_CONTRACT_STATUS, cat_NAME_PAYMENT_TYPE, cat_CODE_REJECT_REASON, cat_NAME_CLIENT_TYPE,
cat_NAME_PORTFOLIO, cat_NAME_PRODUCT_TYPE, cat_NAME_YIELD_GROUP, cat_PRODUCT_COMBINATION, cat_NFLAG_INSURED_ON_APPROVAL, cat_pa
gc.collect()
pa_cat = cat_pa_dummy.drop('SK_ID_PREV', axis=1 ).groupby('SK_ID_CURR' ).mean().reset_index()
num_col = []
for i in range(len(previous_application.columns)) :
if(previous_application.iloc[:, i].dtype == 'int64')or(previous_application.iloc[:, i].dtype == 'float64'):
num_col.append(i)
num_pa = previous_application.iloc[:, num_col].drop(['HOUR_APPR_PROCESS_START', 'NFLAG_LAST_APPL_IN_DAY', 'NFLAG_INSURED_ON_APPROVAL'], axis=1)
for i in range(-5, 0):
num_pa.iloc[:, i] = num_pa.iloc[:, i].map(lambda x: np.nan if x==365243.0 else x)
num_pa['app_vs_actual_less'] =(num_pa.AMT_APPLICATION < num_pa.AMT_CREDIT ).map(lambda x: 1 if x==True else 0)
num_pa['app_vs_actual_more'] =(num_pa.AMT_APPLICATION > num_pa.AMT_CREDIT ).map(lambda x: 1 if x==True else 0)
a = num_pa.DAYS_DECISION - num_pa.DAYS_DECISION.min() + 1
num_pa['ADJ_SCORE'] =(a-a.min())/(a.max() -a.min())+ 0.5
num_pa['ADJ_AMT_ANNUITY'] = num_pa['ADJ_SCORE']*num_pa['AMT_ANNUITY']
num_pa['ADJ_AMT_APPLICATION'] = num_pa['ADJ_SCORE']*num_pa['AMT_APPLICATION']
num_pa['ADJ_AMT_CREDIT'] = num_pa['ADJ_SCORE']*num_pa['AMT_CREDIT']
num_pa['FREQ'] =(num_pa['DAYS_LAST_DUE']-num_pa['DAYS_FIRST_DUE'])/num_pa['CNT_PAYMENT']
pa_prev_count = num_pa.groupby('SK_ID_CURR')['SK_ID_PREV'].count()
pa_avg_annuity = num_pa.groupby('SK_ID_CURR')['AMT_ANNUITY'].mean()
pa_avg_application = num_pa.groupby('SK_ID_CURR')['AMT_APPLICATION'].mean()
pa_avg_actual_credit = num_pa.groupby('SK_ID_CURR')['AMT_CREDIT'].mean()
pa_max_application = num_pa.groupby('SK_ID_CURR')['AMT_APPLICATION'].max()
pa_max_actual_credit = num_pa.groupby('SK_ID_CURR')['AMT_CREDIT'].max()
pa_total_annuity = num_pa.groupby('SK_ID_CURR')['AMT_ANNUITY'].sum()
pa_total_application = num_pa.groupby('SK_ID_CURR')['AMT_APPLICATION'].sum()
pa_total_actual_credit = num_pa.groupby('SK_ID_CURR')['AMT_CREDIT'].sum()
pa_not_full_credit_times = num_pa.groupby('SK_ID_CURR')['app_vs_actual_less'].sum()
pa_not_full_credit_rate = num_pa.groupby('SK_ID_CURR')['app_vs_actual_less'].mean()
pa_get_more_credit_times = num_pa.groupby('SK_ID_CURR')['app_vs_actual_more'].sum()
pa_get_more_credit_rate = num_pa.groupby('SK_ID_CURR')['app_vs_actual_more'].mean()
pa_total_annuity_adj = num_pa.groupby('SK_ID_CURR')['ADJ_AMT_ANNUITY'].sum()
pa_total_application_adj = num_pa.groupby('SK_ID_CURR')['ADJ_AMT_APPLICATION'].sum()
pa_total_actual_credit_adj = num_pa.groupby('SK_ID_CURR')['ADJ_AMT_CREDIT'].sum()
pa_avg_down_payment = num_pa.groupby('SK_ID_CURR')['AMT_DOWN_PAYMENT'].mean()
pa_max_down_payment = num_pa.groupby('SK_ID_CURR')['AMT_DOWN_PAYMENT'].max()
pa_total_down_payment = num_pa.groupby('SK_ID_CURR')['AMT_DOWN_PAYMENT'].sum()
pa_avg_goods_price = num_pa.groupby('SK_ID_CURR')['AMT_GOODS_PRICE'].mean()
pa_max_goods_price = num_pa.groupby('SK_ID_CURR')['AMT_GOODS_PRICE'].max()
pa_total_goods_price = num_pa.groupby('SK_ID_CURR')['AMT_GOODS_PRICE'].sum()
pa_avg_down_payment_rate = num_pa.groupby('SK_ID_CURR')['RATE_DOWN_PAYMENT'].mean()
pa_max_down_payment_rate = num_pa.groupby('SK_ID_CURR')['RATE_DOWN_PAYMENT'].max()
pa_total_down_payment_rate = num_pa.groupby('SK_ID_CURR')['RATE_DOWN_PAYMENT'].sum()
pa_avg_selling_area = num_pa.groupby('SK_ID_CURR')['SELLERPLACE_AREA'].mean()
pa_max_selling_area = num_pa.groupby('SK_ID_CURR')['SELLERPLACE_AREA'].max()
pa_total_selling_area = num_pa.groupby('SK_ID_CURR')['SELLERPLACE_AREA'].sum()
pa_avg_term = num_pa.groupby('SK_ID_CURR')['CNT_PAYMENT'].mean()
pa_max_term = num_pa.groupby('SK_ID_CURR')['CNT_PAYMENT'].max()
pa_total_term = num_pa.groupby('SK_ID_CURR')['CNT_PAYMENT'].sum()
pa_most_frequent_term = num_pa.groupby('SK_ID_CURR')['CNT_PAYMENT'].agg(pd.Series.mode ).map(lambda x: np.mean(x))
pa_recent_decision_day = num_pa.groupby('SK_ID_CURR')['DAYS_DECISION'].max()
pa_earliest_decision_day = num_pa.groupby('SK_ID_CURR')['DAYS_DECISION'].min()
pa_usage_length = pa_recent_decision_day - pa_earliest_decision_day
num_pa['application_interval'] = num_pa.groupby('SK_ID_CURR')['DAYS_DECISION'].diff(-1)
missing_iter = iter(num_pa.groupby('SK_ID_CURR')['DAYS_DECISION'].max())
num_pa.application_interval = num_pa.application_interval.map(lambda x: -next(missing_iter)if np.isnan(x)else -x)
pa_avg_intervel = num_pa.groupby('SK_ID_CURR')['application_interval'].mean()
pa_sd_intervel = num_pa.groupby('SK_ID_CURR')['application_interval'].agg('std' ).fillna(0)
pa_first_due_day = num_pa.groupby('SK_ID_CURR')['DAYS_FIRST_DUE'].min()
pa_last_due_day = num_pa.groupby('SK_ID_CURR')['DAYS_LAST_DUE'].max()
pa_last_termination_day = num_pa.groupby('SK_ID_CURR')['DAYS_TERMINATION'].max()
pa_lastdue_termination_range = pa_last_termination_day - pa_last_due_day
pa_avg_freq = num_pa.groupby('SK_ID_CURR')['FREQ'].mean()
pa_min_freq = num_pa.groupby('SK_ID_CURR')['FREQ'].min()
pa_num = pd.DataFrame({'pa_prev_count':pa_prev_count, 'pa_avg_annuity':pa_avg_annuity, 'pa_avg_application':pa_avg_application, 'pa_avg_actual_credit':pa_avg_actual_credit,
'pa_max_application':pa_max_application, 'pa_max_actual_credit':pa_max_actual_credit, 'pa_total_annuity':pa_total_annuity, 'pa_total_application':pa_total_application, 'pa_total_actual_credit':pa_total_actual_credit,
'pa_not_full_credit_times':pa_not_full_credit_times, 'pa_not_full_credit_rate':pa_not_full_credit_rate, 'pa_get_more_credit_times':pa_get_more_credit_times, 'pa_get_more_credit_rate':pa_get_more_credit_rate,
'pa_total_annuity_adj':pa_total_annuity_adj, 'pa_total_application_adj':pa_total_application_adj, 'pa_total_actual_credit_adj':pa_total_actual_credit_adj,
'pa_avg_down_payment':pa_avg_down_payment, 'pa_max_down_payment':pa_max_down_payment, 'pa_total_down_payment':pa_total_down_payment,
'pa_avg_goods_price':pa_avg_goods_price, 'pa_max_goods_price':pa_max_goods_price, 'pa_total_goods_price':pa_total_goods_price,
'pa_avg_down_payment_rate':pa_avg_down_payment_rate, 'pa_max_down_payment_rate':pa_max_down_payment_rate, 'pa_total_down_payment_rate':pa_total_down_payment_rate,
'pa_avg_selling_area':pa_avg_selling_area, 'pa_max_selling_area':pa_max_selling_area, 'pa_total_selling_area':pa_total_selling_area,
'pa_avg_term':pa_avg_term, 'pa_max_term':pa_max_term, 'pa_total_term':pa_total_term, 'pa_most_frequent_term':pa_most_frequent_term,
'pa_recent_decision_day':pa_recent_decision_day, 'pa_earliest_decision_day':pa_earliest_decision_day, 'pa_usage_length': pa_usage_length,
'pa_avg_intervel':pa_avg_intervel, 'pa_sd_intervel':pa_sd_intervel, 'pa_first_due_day':pa_first_due_day, 'pa_last_due_day':pa_last_due_day,
'pa_last_termination_day':pa_last_termination_day, 'pa_lastdue_termination_range':pa_lastdue_termination_range, 'pa_avg_freq':pa_avg_freq, 'pa_min_freq':pa_min_freq} ).reset_index()
pa_feature = pa_num.merge(pa_cat, on='SK_ID_CURR')
del pa_num, pa_cat
gc.collect()
pa_feature.to_csv('pa_feature.csv', index=False ) | Home Credit Default Risk |
11,198,944 | accuracy_score(y_test, result )<compute_test_metric> | bureau = pd.read_csv('.. /input/bureau.csv')
bureau_balance = pd.read_csv('.. /input/bureau_balance.csv')
med = bureau.AMT_CREDIT_SUM.median()
bureau.AMT_CREDIT_SUM = bureau.AMT_CREDIT_SUM.fillna(med)
med = bureau.AMT_CREDIT_SUM_DEBT.median()
bureau.AMT_CREDIT_SUM_DEBT = bureau.AMT_CREDIT_SUM_DEBT.fillna(med)
bureau['OVERDUE_DEBT_RATIO'] = bureau.AMT_CREDIT_SUM_OVERDUE/(bureau.AMT_CREDIT_SUM_DEBT+1)
bureau['DEBT_TOTAL_RATIO'] = bureau.AMT_CREDIT_SUM_DEBT/(bureau.AMT_CREDIT_SUM+1)
bureau_balance['INT_STATUS'] = bureau_balance.STATUS.replace('X', 0.1 ).replace('C', 0 ).astype('int64')
bur_max_bad_level = bureau_balance.groupby('SK_ID_BUREAU')['INT_STATUS'].max().reset_index()
cluter_bur = bureau[['SK_ID_BUREAU', 'CREDIT_DAY_OVERDUE', 'OVERDUE_DEBT_RATIO', 'DEBT_TOTAL_RATIO', 'CNT_CREDIT_PROLONG']].merge(bur_max_bad_level, on='SK_ID_BUREAU', how='left' ).fillna(0)
X = cluter_bur.drop(['SK_ID_BUREAU'], axis=1)
X = Normalizer().fit_transform(X)
gmm = GaussianMixture(n_components=2, verbose=5, max_iter=100, init_params='kmeans')
gmm.fit(X)
group_prob = gmm.predict_proba(X)
group_prob = np.round(group_prob, decimals=2)
bur_cluster = pd.concat([bureau[['SK_ID_CURR', 'SK_ID_BUREAU']], pd.DataFrame({'cluster':group_prob[:, 0]})], axis=1)
bur_cluster = bur_cluster.groupby('SK_ID_CURR')['cluster'].mean().reset_index()
bur_cluster.to_csv('bur_cluster.csv', index=False)
| Home Credit Default Risk |
11,198,944 | print(classification_report(y_test, result))<predict_on_test> | a = time.time()
row1=None
row2=None
row3=None
app_train = pd.read_csv('.. /input/application_train.csv', nrows=row1 ).sort_values('SK_ID_CURR')
app_test = pd.read_csv('.. /input/application_test.csv', nrows=row1 ).sort_values('SK_ID_CURR')
bureau = pd.read_csv('.. /input/bureau.csv', nrows=row2 ).sort_values(['SK_ID_CURR', 'SK_ID_BUREAU'])
bureau_balance = pd.read_csv('.. /input/bureau_balance.csv', nrows=row3 ).sort_values(['SK_ID_BUREAU', 'MONTHS_BALANCE'])
previous = pd.read_csv('.. /input/previous_application.csv', nrows=row3 ).sort_values(['SK_ID_CURR', 'SK_ID_PREV'])
cash = pd.read_csv('.. /input/POS_CASH_balance.csv', nrows=row3 ).sort_values(['SK_ID_CURR', 'SK_ID_PREV', 'MONTHS_BALANCE'])
credit = pd.read_csv('.. /input/credit_card_balance.csv', nrows=row3 ).sort_values(['SK_ID_CURR', 'SK_ID_PREV', 'MONTHS_BALANCE'])
installments = pd.read_csv('.. /input/installments_payments.csv', nrows=row3 ).sort_values(['SK_ID_CURR', 'SK_ID_PREV'])
app_train = app_train[['SK_ID_CURR']]
app_test = app_test[['SK_ID_CURR']]
bureau = bureau[['SK_ID_CURR', 'SK_ID_BUREAU', 'DAYS_CREDIT', 'AMT_CREDIT_MAX_OVERDUE', 'CNT_CREDIT_PROLONG',
'AMT_CREDIT_SUM', 'AMT_CREDIT_SUM_DEBT', 'AMT_CREDIT_SUM_LIMIT', 'AMT_CREDIT_SUM_OVERDUE']]
bureau_balance.STATUS = bureau_balance.STATUS.map({'C':0, 'X':0.1, '1':1, '2':2, '3':3, '4':4, '5':5})
previous = previous[['SK_ID_CURR', 'SK_ID_PREV', 'AMT_ANNUITY', 'AMT_APPLICATION', 'AMT_CREDIT', 'AMT_GOODS_PRICE', 'NAME_CONTRACT_STATUS', 'DAYS_DECISION',
'CNT_PAYMENT', 'NAME_YIELD_GROUP', 'NFLAG_INSURED_ON_APPROVAL','SELLERPLACE_AREA']]
previous.NAME_CONTRACT_STATUS = previous.NAME_CONTRACT_STATUS.map(lambda x: 1 if x=='Refused' else 0)
previous.NAME_YIELD_GROUP = previous.NAME_YIELD_GROUP.map({'XNA':0, 'low_noraml':1, 'low_action':1, 'middle':2, 'high':3})
cash = cash[['SK_ID_PREV', 'MONTHS_BALANCE', 'SK_DPD', 'SK_DPD_DEF']]
credit = credit[['SK_ID_PREV', 'MONTHS_BALANCE', 'SK_DPD', 'SK_DPD_DEF', 'AMT_BALANCE', 'AMT_CREDIT_LIMIT_ACTUAL', 'AMT_INST_MIN_REGULARITY', 'AMT_TOTAL_RECEIVABLE']]
installments['date_diff'] = installments.DAYS_INSTALMENT - installments.DAYS_ENTRY_PAYMENT
installments['amount_diff'] = installments.AMT_INSTALMENT - installments.AMT_PAYMENT
installments = installments[['SK_ID_PREV', 'DAYS_INSTALMENT', 'amount_diff', 'date_diff']]
def replace_day_outliers(df):
for col in df.columns:
if "DAYS" in col:
df[col] = df[col].replace({365243: np.nan})
return df
app_train = replace_day_outliers(app_train)
app_test = replace_day_outliers(app_test)
bureau = replace_day_outliers(bureau)
bureau_balance = replace_day_outliers(bureau_balance)
previous = replace_day_outliers(previous)
cash = replace_day_outliers(cash)
credit = replace_day_outliers(credit)
installments = replace_day_outliers(installments)
start_date = pd.Timestamp("2018-01-01")
for col in ['DAYS_CREDIT']:
bureau[col] = pd.to_timedelta(bureau[col], 'D')
bureau['bureau_credit_application_date'] = start_date + bureau['DAYS_CREDIT']
bureau_balance['MONTHS_BALANCE'] = pd.to_timedelta(bureau_balance['MONTHS_BALANCE'], 'M')
bureau_balance['bureau_balance_date'] = start_date + bureau_balance['MONTHS_BALANCE']
bureau = bureau_balance.drop(columns = ['DAYS_CREDIT'])
bureau_balance = bureau_balance.drop(columns = ['MONTHS_BALANCE'])
for col in ['DAYS_DECISION']:
previous[col] = pd.to_timedelta(previous[col], 'D')
previous['previous_decision_date'] = start_date + previous['DAYS_DECISION']
previous = previous.drop(columns = ['DAYS_DECISION'])
cash['MONTHS_BALANCE'] = pd.to_timedelta(cash['MONTHS_BALANCE'], 'M')
cash['cash_balance_date'] = start_date + cash['MONTHS_BALANCE']
cash = cash.drop(columns = ['MONTHS_BALANCE'])
credit['MONTHS_BALANCE'] = pd.to_timedelta(credit['MONTHS_BALANCE'], 'M')
credit['credit_balance_date'] = start_date + credit['MONTHS_BALANCE']
credit = credit.drop(columns = ['MONTHS_BALANCE'])
installments['DAYS_INSTALMENT'] = pd.to_timedelta(installments['DAYS_INSTALMENT'], 'D')
installments['installments_due_date'] = start_date + installments['DAYS_INSTALMENT']
installments = installments.drop(columns = ['DAYS_INSTALMENT'])
es = ft.EntitySet(id = 'clients')
es = es.entity_from_dataframe(entity_id = 'app_train', dataframe = app_train,
index = 'SK_ID_CURR')
es = es.entity_from_dataframe(entity_id = 'app_test', dataframe = app_test,
index = 'SK_ID_CURR')
es = es.entity_from_dataframe(entity_id = 'bureau', dataframe = bureau,
index = 'SK_ID_BUREAU', time_index='bureau_credit_application_date')
es = es.entity_from_dataframe(entity_id = 'bureau_balance', dataframe = bureau_balance,
make_index = True, index = 'bb_index',
time_index = 'bureau_balance_date')
es = es.entity_from_dataframe(entity_id = 'previous', dataframe = previous,
index = 'SK_ID_PREV', time_index = 'previous_decision_date')
es = es.entity_from_dataframe(entity_id = 'cash', dataframe = cash,
make_index = True, index = 'cash_index',
time_index = 'cash_balance_date')
es = es.entity_from_dataframe(entity_id = 'installments', dataframe = installments,
make_index = True, index = 'installments_index',
time_index = 'installments_due_date')
es = es.entity_from_dataframe(entity_id = 'credit', dataframe = credit,
make_index = True, index = 'credit_index',
time_index = 'credit_balance_date')
r_app_bureau = ft.Relationship(es['app_train']['SK_ID_CURR'], es['bureau']['SK_ID_CURR'])
r_test_app_bureau = ft.Relationship(es['app_test']['SK_ID_CURR'], es['bureau']['SK_ID_CURR'])
r_bureau_balance = ft.Relationship(es['bureau']['SK_ID_BUREAU'], es['bureau_balance']['SK_ID_BUREAU'])
r_app_previous = ft.Relationship(es['app_train']['SK_ID_CURR'], es['previous']['SK_ID_CURR'])
r_test_app_previous = ft.Relationship(es['app_test']['SK_ID_CURR'], es['previous']['SK_ID_CURR'])
r_previous_cash = ft.Relationship(es['previous']['SK_ID_PREV'], es['cash']['SK_ID_PREV'])
r_previous_installments = ft.Relationship(es['previous']['SK_ID_PREV'], es['installments']['SK_ID_PREV'])
r_previous_credit = ft.Relationship(es['previous']['SK_ID_PREV'], es['credit']['SK_ID_PREV'])
es = es.add_relationships([r_app_bureau, r_test_app_bureau, r_bureau_balance,
r_app_previous, r_test_app_previous, r_previous_cash,
r_previous_installments, r_previous_credit])
del app_train, app_test, bureau, bureau_balance, cash, credit, previous, installments
gc.collect()
print('prepare time:', str(time.time() -a))
a = time.time()
time_features, time_feature_names = ft.dfs(entityset = es, target_entity = 'app_train',
max_depth = 2,
agg_primitives = ['trend'],
features_only = False, verbose = True,
chunk_size = 30000,
ignore_entities = ['app_test'])
print('feature time:,', str(time.time() -a))
time_features.reset_index().to_csv('trend3_train.csv', index=False)
time_features_test, time_feature_names = ft.dfs(entityset = es, target_entity = 'app_test',
max_depth = 2,
agg_primitives = ['trend'],
features_only = False, verbose = True,
chunk_size = 25000,
ignore_entities = ['app_train'])
time_features_test.reset_index().to_csv('trend4_test.csv', index=False)
| Home Credit Default Risk |
11,198,944 | def prevendo_noticias(string, model):
to_array=[]
to_array.append(review_to_words(string))
sample_final=vectorizer.transform(to_array)
sample_final=sample_final.toarray()
result = model.predict(sample_final)
if result[0] == 1:
label = 'Fake News'
else:
label = 'Verdadeira'
return label, string<compute_test_metric> | full_df = pd.read_csv(".. /input/home-credit-default-risk/application_train.csv")
test_df = pd.read_csv(".. /input/home-credit-default-risk/application_test.csv")
test_df
| Home Credit Default Risk |
11,198,944 | prevendo_noticias('Bolsonaro pessoalmente incendêia a amazonia e mata as girafas', forest )<drop_column> | full_df = pd.get_dummies(full_df)
test_df = pd.get_dummies(test_df)
full_df, test_df = full_df.align(test_df, join = 'inner', axis = 1)
test_df.head(10)
test_df.count() | Home Credit Default Risk |
11,198,944 | prevendo_noticias('Jornalista joga água benta em Temer e ele admite que impeachment foi golpe', forest )<train_model> | Home Credit Default Risk | |
11,198,944 | model_final = RandomForestClassifier(random_state=42)
<define_search_space> | def model(features, test_features, encoding = 'ohe', n_folds = 5):
train_ids = features['SK_ID_CURR']
test_ids = test_features['SK_ID_CURR']
labels = features['TARGET']
features = features.drop(columns = ['SK_ID_CURR', 'TARGET'])
test_features = test_features.drop(columns = ['SK_ID_CURR'])
if encoding == 'ohe':
features = pd.get_dummies(features)
test_features = pd.get_dummies(test_features)
features, test_features = features.align(test_features, join = 'inner', axis = 1)
cat_indices = 'auto'
elif encoding == 'le':
label_encoder = LabelEncoder()
cat_indices = []
for i, col in enumerate(features):
if features[col].dtype == 'object':
features[col] = label_encoder.fit_transform(np.array(features[col].astype(str)).reshape(( -1,)))
test_features[col] = label_encoder.transform(np.array(test_features[col].astype(str)).reshape(( -1,)))
cat_indices.append(i)
else:
raise ValueError("Encoding must be either 'ohe' or 'le'")
print('Training Data Shape: ', features.shape)
print('Testing Data Shape: ', test_features.shape)
feature_names = list(features.columns)
features = np.array(features)
test_features = np.array(test_features)
k_fold = KFold(n_splits = n_folds, shuffle = True, random_state = 50)
feature_importance_values = np.zeros(len(feature_names))
test_predictions = np.zeros(test_features.shape[0])
out_of_fold = np.zeros(features.shape[0])
valid_scores = []
train_scores = []
for train_indices, valid_indices in k_fold.split(features):
train_features, train_labels = features[train_indices], labels[train_indices]
valid_features, valid_labels = features[valid_indices], labels[valid_indices]
model = lgb.LGBMClassifier(n_estimators=500, objective = 'binary',
class_weight = 'balanced', learning_rate = 0.5,
reg_alpha = 0.7, reg_lambda = 0.5,
subsample = 0.5, n_jobs = -1, random_state = 30)
model.fit(train_features, train_labels, eval_metric = 'auc',
eval_set = [(valid_features, valid_labels),(train_features, train_labels)],
eval_names = ['valid', 'train'], categorical_feature = cat_indices,
early_stopping_rounds = 25, verbose = 200)
best_iteration = model.best_iteration_
feature_importance_values += model.feature_importances_ / k_fold.n_splits
test_predictions += model.predict_proba(test_features, num_iteration = best_iteration)[:, 1] / k_fold.n_splits
out_of_fold[valid_indices] = model.predict_proba(valid_features, num_iteration = best_iteration)[:, 1]
valid_score = model.best_score_['valid']['auc']
train_score = model.best_score_['train']['auc']
valid_scores.append(valid_score)
train_scores.append(train_score)
gc.enable()
del model, train_features, valid_features
gc.collect()
submission = pd.DataFrame({'SK_ID_CURR': test_ids, 'TARGET': test_predictions})
feature_importances = pd.DataFrame({'feature': feature_names, 'importance': feature_importance_values})
valid_auc = roc_auc_score(labels, out_of_fold)
valid_scores.append(valid_auc)
train_scores.append(np.mean(train_scores))
fold_names = list(range(n_folds))
fold_names.append('overall')
metrics = pd.DataFrame({'fold': fold_names,
'train': train_scores,
'valid': valid_scores})
return submission, feature_importances, metrics | Home Credit Default Risk |
11,198,944 | <train_on_grid><EOS> | submission, fi, metrics = model(full_df, test_df)
print('Baseline metrics')
print(metrics)
submission.to_csv('baseline_lgb.csv', index = False)
| Home Credit Default Risk |
9,356,808 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<find_best_params> | import numpy as np
import pandas as pd | Home Credit Default Risk |
9,356,808 | CV_rf.best_params_<train_model> | from tqdm.notebook import tqdm
import random
import gc
import time | Home Credit Default Risk |
9,356,808 | model_fit = RandomForestClassifier(random_state=42, bootstrap= True, criterion= 'entropy', n_estimators= 800 )<train_model> | from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import ExtraTreesClassifier | Home Credit Default Risk |
9,356,808 | %time model_fit = model_fit.fit(X_train, y_train)
result = model_fit.predict(X_test )<compute_test_metric> | import lightgbm as lgb | Home Credit Default Risk |
9,356,808 | accuracy_score(y_test, result )<train_model> | gc.enable() | Home Credit Default Risk |
9,356,808 | %time model_final = model_fit.fit(train_data_features, train_y )<load_from_csv> | train_data = pd.read_csv('/kaggle/input/home-credit-default-risk/application_train.csv',
na_values=['XNA', 'XAP'], na_filter=True)
test_data = pd.read_csv('/kaggle/input/home-credit-default-risk/application_test.csv',
na_values=['XNA', 'XAP'], na_filter=True ) | Home Credit Default Risk |
9,356,808 | test = pd.read_csv('.. /input/fakenewsvortexbsb/sample_submission.csv', sep=';', error_bad_lines=False, quoting=3);<categorify> | train_counts = train_data.count().sort_values() /len(train_data)
test_counts = test_data.count().sort_values() /len(test_data ) | Home Credit Default Risk |
9,356,808 | clean_test_review=[]
for i in range(0,num_reviews):
clean_test_review.append(review_to_words(test['Manchete'][i]))<categorify> | cols = set(train_counts[(train_counts < 1)&(train_counts > 0.99)].index)- set(test_counts[(test_counts < 1)&(test_counts > 0.9)].index ) | Home Credit Default Risk |
9,356,808 | test_data_features = vectorizer.transform(clean_test_review)
test_data_features=test_data_features.toarray()<predict_on_test> | train_data.dropna(subset=cols, inplace=True ) | Home Credit Default Risk |
9,356,808 | result_test = model_final.predict(test_data_features )<save_to_csv> | train_target = train_data[['SK_ID_CURR', 'TARGET']] | Home Credit Default Risk |
9,356,808 | minha_sub = pd.DataFrame({'index': test.index, 'Category': result_test})
minha_sub.to_csv('submission.csv', index=False )<import_modules> | submit = test_data[['SK_ID_CURR']] | Home Credit Default Risk |
9,356,808 | import pandas as pd
import numpy as np
import sklearn
import matplotlib.pyplot as plt
from sklearn import ensemble, neighbors
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_log_error, make_scorer
from sklearn.feature_selection import f_regression
import os<load_from_csv> | train_data.drop(columns=['TARGET'], inplace=True ) | Home Credit Default Risk |
9,356,808 | test_d = pd.read_csv('.. /input/test.csv',index_col='Id')
train_d = pd.read_csv('.. /input/train.csv', index_col='Id' )<concatenate> | test_data['IS_TRAIN'] = 0
train_data['IS_TRAIN'] = 1 | Home Credit Default Risk |
9,356,808 | test_d['median_house_value'] = np.nan
data = train_d.append(test_d, sort=False )<feature_engineering> | application_data = train_data.append(test_data ) | Home Credit Default Risk |
9,356,808 | train_d['median_age_sum'] = train_d.groupby('median_age')['median_house_value'].transform('sum')
ages = train_d['median_age'].value_counts()
for i, row in train_d.iterrows() :
train_d.loc[i,'median_age_freq'] = ages[train_d.loc[i,'median_age']]<feature_engineering> | del(train_data)
del(test_data ) | Home Credit Default Risk |
9,356,808 | data['rooms_not_bedrooms'] = np.subtract(data['total_rooms'],data['total_bedrooms'])
data['bedrooms_per_house'] = np.divide(data['total_bedrooms'],data['households'])
data['rooms_not_bedrooms_per_house'] = np.divide(data['total_bedrooms'],data['households'])
data['pop_per_house'] = np.divide(data['population'],data['households'])
data['pop_per_bedroom'] = np.divide(data['population'],data['total_bedrooms'])
data['pop_per_room'] = np.divide(data['population'],data['total_bedrooms'])
data['income_per_pop'] = np.divide(data['median_income'],data['pop_per_house'] )<feature_engineering> | appl_counts = application_data.count().sort_values() /len(application_data ) | Home Credit Default Risk |
9,356,808 | ages_disc=[]
for i,row in data.iterrows() :
if row['median_age'] < 16:
ages_disc.append(0)
elif row['median_age'] < 36:
ages_disc.append(1)
elif row['median_age'] < 52:
ages_disc.append(2)
else:
ages_disc.append(3)
data['age_class'] = ages_disc<data_type_conversions> | appl_counts[(appl_counts < 0.6)] | Home Credit Default Risk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.