kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
1,533,277
df['category_id'] = df['Category'].factorize() [0] df['category_id'][0:10]<remove_duplicates>
tmp = previous[previous['NAME_CONTRACT_STATUS'] != 'Approved'].groupby(['SK_ID_CURR'])['DAYS_DECISION']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_day_decision_fail'] = tmp_merge['des1'] df['max_day_decision_fail'] = tmp_merge['des2'] df['mean_day_decision_fail'] = tmp_merge['des3']
Home Credit Default Risk
1,533,277
category_id_df = df[['Category', 'category_id']].drop_duplicates().sort_values('category_id' )<define_variables>
tmp = previous[(~previous['NAME_CASH_LOAN_PURPOSE'].isin(['XAP','XNA'])) ] for df in [train,test]: tmp1 = tmp.groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index() tmp1.columns = ['SK_ID_CURR','des'] tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp1, on=['SK_ID_CURR'], how='left') df['count_clear_reason'] = tmp_merge['des']
Home Credit Default Risk
1,533,277
category_to_id = dict(category_id_df.values) id_to_category = dict(category_id_df[['category_id', 'Category']].values )<count_values>
tmp = previous.groupby(['SK_ID_CURR'])['DAYS_TERMINATION']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_day_termination'] = tmp_merge['des1'] df['max_day_termination'] = tmp_merge['des2'] df['mean_day_termination'] = tmp_merge['des3']
Home Credit Default Risk
1,533,277
df.groupby('Category' ).category_id.count() <categorify>
tmp = previous.groupby(['SK_ID_CURR'])['DAYS_LAST_DUE_1ST_VERSION']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_day_lastdue'] = tmp_merge['des1'] df['max_day_lastdue'] = tmp_merge['des2'] df['mean_day_lastdue'] = tmp_merge['des3']
Home Credit Default Risk
1,533,277
tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english') features = tfidf.fit_transform(df.Text ).toarray() labels = df.category_id <sort_values>
tmp = previous[~previous['DAYS_LAST_DUE_1ST_VERSION'].isnull() ].sort_values(by=['SK_ID_CURR','DAYS_LAST_DUE_1ST_VERSION']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-2 ).reset_index() tmp = tmp[['SK_ID_CURR','DAYS_LAST_DUE_1ST_VERSION']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['2nd_day_lastdue'] = tmp_merge['des']
Home Credit Default Risk
1,533,277
sorted(category_to_id.items() )<statistical_test>
tmp = previous.groupby(['SK_ID_CURR'])['sooner']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_sooner'] = tmp_merge['des1'] df['max_sooner'] = tmp_merge['des2'] df['mean_sooner'] = tmp_merge['des3']
Home Credit Default Risk
1,533,277
N = 3 for Category, category_id in sorted(category_to_id.items()): features_chi2 = chi2(features, labels == category_id) indices = np.argsort(features_chi2[0]) feature_names = np.array(tfidf.get_feature_names())[indices] unigrams = [v for v in feature_names if len(v.split(' ')) == 1] bigrams = [v for v in feature_names if len(v.split(' ')) == 2] print(" print(".Most correlated unigrams: .{}".format(' .'.join(unigrams[-N:]))) print(".Most correlated bigrams: .{}".format(' .'.join(bigrams[-N:])) )<define_variables>
tmp = previous.groupby(['SK_ID_CURR'])['SELLERPLACE_AREA']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_seller'] = tmp_merge['des1'] df['max_seller'] = tmp_merge['des2'] df['mean_seller'] = tmp_merge['des3']
Home Credit Default Risk
1,533,277
SAMPLE_SIZE = int(len(features)* 0.3) np.random.seed(0) indices = np.random.choice(range(len(features)) , size=SAMPLE_SIZE, replace=False) projected_features = TSNE(n_components=2, random_state=0 ).fit_transform(features[indices]) <filter>
for i in ['middle','low_normal','high','low_action']: tmp = previous[(previous['NAME_CONTRACT_TYPE'] == 'Cash loans')&(previous['NAME_YIELD_GROUP'] == i)] for df in [train,test]: tmp1 = tmp.groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index() tmp1.columns = ['SK_ID_CURR','des'] tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp1, on=['SK_ID_CURR'], how='left') df['count_' + str(i)] = tmp_merge['des'].fillna(0 )
Home Credit Default Risk
1,533,277
my_id = 0 projected_features[(labels[indices] == my_id ).values]<choose_model_class>
for df in [train,test]: df['tmp'] = df[['count_middle','count_low_normal','count_high','count_low_action']].sum(axis=1) for i in ['middle','low_normal','high','low_action']: df['ratio_' + i] = df['count_' + i]/df['tmp']
Home Credit Default Risk
1,533,277
models = [ RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0), MultinomialNB() , LogisticRegression(random_state=0), ] <create_dataframe>
for i in ['middle','low_normal','high','low_action']: tmp = previous[(previous['NAME_CONTRACT_TYPE'] == 'Consumer loans')&(previous['NAME_YIELD_GROUP'] == i)] for df in [train,test]: tmp1 = tmp.groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index() tmp1.columns = ['SK_ID_CURR','des'] tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp1, on=['SK_ID_CURR'], how='left') df['count_' + str(i)+ '_v1'] = tmp_merge['des'].fillna(0 )
Home Credit Default Risk
1,533,277
CV = 5 cv_df = pd.DataFrame(index=range(CV * len(models))) entries = []<find_best_model_class>
for df in [train,test]: df['tmp'] = df[['count_middle_v1','count_low_normal_v1','count_high_v1','count_low_action_v1']].sum(axis=1) for i in ['middle','low_normal','high','low_action']: df['ratio_' + i +"_v1"] = df['count_' + i + "_v1"]/df['tmp']
Home Credit Default Risk
1,533,277
for model in models: model_name = model.__class__.__name__ accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=CV) for fold_idx, accuracy in enumerate(accuracies): entries.append(( model_name, fold_idx, accuracy))<create_dataframe>
previous['tmp'] =(previous['AMT_ANNUITY'] * previous['CNT_PAYMENT'])/previous['AMT_CREDIT'] tmp = previous[(previous['NAME_CONTRACT_TYPE'] == 'Cash loans')&(previous['NAME_CONTRACT_STATUS'] != 'Approved')].groupby(['SK_ID_CURR'])['tmp']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_interest'] = tmp_merge['des1'] df['max_interest'] = tmp_merge['des2'] df['mean_interest'] = tmp_merge['des3'] tmp = previous[(previous['NAME_CONTRACT_TYPE'] == 'Consumer loans')&(previous['NAME_CONTRACT_STATUS'] != 'Approved')].groupby(['SK_ID_CURR'])['tmp']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_interest_v1'] = tmp_merge['des1'] df['max_interest_v1'] = tmp_merge['des2'] df['mean_interest_v1'] = tmp_merge['des3']
Home Credit Default Risk
1,533,277
cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'] )<groupby>
tmp = previous.groupby(['SK_ID_CURR'])['DAYS_FIRST_DRAWING']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_firstdraw'] = tmp_merge['des1'] df['max_firstdraw'] = tmp_merge['des2'] df['mean_firstdraw'] = tmp_merge['des3']
Home Credit Default Risk
1,533,277
cv_df.groupby('model_name' ).accuracy.mean()<train_model>
previous['tmp'] = previous['DAYS_FIRST_DRAWING'] - previous['DAYS_DECISION'] tmp = previous.groupby(['SK_ID_CURR'])['tmp']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_firstdraw_decision'] = tmp_merge['des1'] df['max_firstdraw_decision'] = tmp_merge['des2'] df['mean_firstdraw_decision'] = tmp_merge['des3']
Home Credit Default Risk
1,533,277
model = LogisticRegression(random_state=0) X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, df.index, test_size=0.33, random_state=0) model.fit(X_train, y_train) y_pred_proba = model.predict_proba(X_test) y_pred = model.predict(X_test )<compute_test_metric>
previous['tmp'] = previous['DAYS_FIRST_DUE'] - previous['DAYS_FIRST_DRAWING'] tmp = previous.groupby(['SK_ID_CURR'])['tmp']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_firstdraw_firstdue'] = tmp_merge['des1'] df['max_firstdraw_firstdue'] = tmp_merge['des2'] df['mean_firstdraw_firstdue'] = tmp_merge['des3']
Home Credit Default Risk
1,533,277
for predicted in category_id_df.category_id: for actual in category_id_df.category_id: if predicted != actual and conf_mat[actual, predicted] >= 2: print("'{}' predicted as '{}' : {} examples.".format(id_to_category[actual], id_to_category[predicted], conf_mat[actual, predicted])) display(df.loc[indices_test[(y_test == actual)&(y_pred == predicted)]]['Text']) print('' )<train_model>
previous['tmp'] = previous['DAYS_LAST_DUE'] - previous['DAYS_FIRST_DRAWING'] tmp = previous.groupby(['SK_ID_CURR'])['tmp']\ .agg({"returns": [np.min, np.max,np.mean]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_firstdraw_lastdue'] = tmp_merge['des1'] df['max_firstdraw_lastdue'] = tmp_merge['des2'] df['mean_firstdraw_lastdue'] = tmp_merge['des3']
Home Credit Default Risk
1,533,277
model.fit(features, labels )<features_selection>
tmp = bureau[(bureau['CREDIT_ACTIVE'] == "Active")].groupby(['SK_ID_CURR'])['SK_ID_BUREAU'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_active_bureau'] = tmp_merge['des'].fillna(0) tmp = bureau[bureau['CREDIT_ACTIVE'] != "Active"].groupby(['SK_ID_CURR'])['SK_ID_BUREAU'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_closed_bureau'] = tmp_merge['des'].fillna(0 )
Home Credit Default Risk
1,533,277
N = 5 for Category, category_id in sorted(category_to_id.items()): indices = np.argsort(model.coef_[category_id]) feature_names = np.array(tfidf.get_feature_names())[indices] unigrams = [v for v in reversed(feature_names)if len(v.split(' ')) == 1][:N] bigrams = [v for v in reversed(feature_names)if len(v.split(' ')) == 2][:N] print(" print(".Top unigrams: .{}".format(' .'.join(unigrams))) print(".Top bigrams: .{}".format(' .'.join(bigrams)) )<predict_on_test>
tmp = bureau[(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['SK_ID_BUREAU'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_active_bureau_v2'] = tmp_merge['des'].fillna(0) tmp = bureau[(bureau['CREDIT_ACTIVE'] != "Active")&(bureau['CREDIT_TYPE'] == "Credit card")].groupby(['SK_ID_CURR'])['SK_ID_BUREAU'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_closed_bureau_v2'] = tmp_merge['des'].fillna(0 )
Home Credit Default Risk
1,533,277
texts = ["Hooli stock price soared after a dip in PiedPiper revenue growth.", "Captain Tsubasa scores a magnificent goal for the Japanese team.", "Merryweather mercenaries are sent on another mission, as government oversight groups call for new sanctions.", "Beyoncé releases a new album, tops the charts in all of south-east Asia!", "You won't guess what the latest trend in data analysis is!"] text_features = tfidf.transform(texts) predictions = model.predict(text_features) for text, predicted in zip(texts, predictions): print('"{}"'.format(text)) print(" - Predicted as: '{}'".format(id_to_category[predicted])) print("" )<load_from_csv>
tmp = bureau[(~bureau['CREDIT_TYPE'].isin(["Credit card","Consumer credit"])) ].groupby(['SK_ID_CURR'])['SK_ID_BUREAU'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_active_bureau_v3'] = tmp_merge['des'].fillna(0) tmp = bureau[(bureau['CREDIT_ACTIVE'] != "Active")&(~bureau['CREDIT_TYPE'].isin(["Credit card","Consumer credit"])) ].groupby(['SK_ID_CURR'])['SK_ID_BUREAU'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_closed_bureau_v3'] = tmp_merge['des'].fillna(0 )
Home Credit Default Risk
1,533,277
TEST_PATH = os.path.join(".. /input/bbc-test", "BBC News Test.csv") test_df = pd.read_csv(TEST_PATH) <data_type_conversions>
bureau['AMT_CREDIT_SUM'].replace(0, np.nan, inplace = True) tmp = bureau[(bureau['CREDIT_ACTIVE'] == "Active")&(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_active_credit_bureau'] = tmp_merge['des1'] df['max_active_credit_bureau'] = tmp_merge['des2'] df['mean_active_credit_bureau'] = tmp_merge['des3'] df['sum_active_credit_bureau'] = tmp_merge['des4'] tmp = bureau[(bureau['CREDIT_ACTIVE'] != "Active")&(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_closed_credit_bureau'] = tmp_merge['des1'] df['max_closed_credit_bureau'] = tmp_merge['des2'] df['mean_closed_credit_bureau'] = tmp_merge['des3'] df['sum_closed_credit_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
test_df.Text.tolist()<predict_on_test>
bureau['AMT_CREDIT_SUM'].replace(0, np.nan, inplace = True) tmp = bureau[(bureau['CREDIT_TYPE'] == "Credit card")].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_active_credit_bureau_v1'] = tmp_merge['des1'] df['max_active_credit_bureau_v1'] = tmp_merge['des2'] df['mean_active_credit_bureau_v1'] = tmp_merge['des3'] df['sum_active_credit_bureau_v1'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
test_features = tfidf.transform(test_df.Text.tolist()) Y_pred = model.predict(test_features )<define_variables>
bureau['AMT_CREDIT_SUM'].replace(0, np.nan, inplace = True) tmp = bureau[(bureau['CREDIT_TYPE'] == "Car loan")].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_active_credit_bureau_v2'] = tmp_merge['des1'] df['max_active_credit_bureau_v2'] = tmp_merge['des2'] df['mean_active_credit_bureau_v2'] = tmp_merge['des3'] df['sum_active_credit_bureau_v2'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
Y_pred_name =[] for cat_id in Y_pred : Y_pred_name.append(id_to_category[cat_id] )<create_dataframe>
bureau['AMT_CREDIT_SUM'].replace(0, np.nan, inplace = True) tmp = bureau[(~bureau['CREDIT_TYPE'].isin(["Credit card","Consumer credit","Car loan"])) ].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_active_credit_bureau_v3'] = tmp_merge['des1'] df['max_active_credit_bureau_v3'] = tmp_merge['des2'] df['mean_active_credit_bureau_v3'] = tmp_merge['des3'] df['sum_active_credit_bureau_v3'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
submission = pd.DataFrame({ "ArticleId": test_df["ArticleId"], "Category": Y_pred_name } )<save_to_csv>
tmp = bureau.groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_credit_bureau'] = tmp_merge['des1'] df['max_credit_bureau'] = tmp_merge['des2'] df['mean_credit_bureau'] = tmp_merge['des3'] df['sum_credit_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
submission.to_csv('submission.csv', index=False )<load_from_csv>
tmp = bureau[(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['DAYS_CREDIT_ENDDATE'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_endate_bureau'] = tmp_merge['des1'] df['max_endate_bureau'] = tmp_merge['des2'] df['mean_endate_bureau'] = tmp_merge['des3'] df['sum_endate_bureau'] = tmp_merge['des4'] tmp = bureau[(~bureau['DAYS_CREDIT_ENDDATE'].isnull())&(( bureau['CREDIT_TYPE'] == "Consumer credit")) ].sort_values(by=['SK_ID_CURR','DAYS_CREDIT_ENDDATE']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-2 ).reset_index() tmp = tmp[['SK_ID_CURR','DAYS_CREDIT_ENDDATE']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_endate_bureau'] = tmp_merge['des']
Home Credit Default Risk
1,533,277
TRAIN_PATH = os.path.join(".. /input/ai-academy-intermediate-class-competition-1", "BBC News Train.csv") df = pd.read_csv(TRAIN_PATH )<feature_engineering>
tmp = bureau[(bureau['CREDIT_TYPE'] == "Car loan")].groupby(['SK_ID_CURR'])['DAYS_CREDIT_ENDDATE'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_endate_bureau_v1'] = tmp_merge['des1'] df['max_endate_bureau_v1'] = tmp_merge['des2'] df['mean_endate_bureau_v1'] = tmp_merge['des3'] df['sum_endate_bureau_v1'] = tmp_merge['des4'] tmp = bureau[(~bureau['DAYS_CREDIT_ENDDATE'].isnull())&(( bureau['CREDIT_TYPE'] == "Car loan")) ].sort_values(by=['SK_ID_CURR','DAYS_CREDIT_ENDDATE']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-2 ).reset_index() tmp = tmp[['SK_ID_CURR','DAYS_CREDIT_ENDDATE']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_endate_bureau_v1'] = tmp_merge['des']
Home Credit Default Risk
1,533,277
df['category_id'] = df['Category'].factorize() [0] df['category_id'][0:10]<remove_duplicates>
tmp = bureau[(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['DAYS_CREDIT'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_startdate_bureau'] = tmp_merge['des1'] df['max_startdate_bureau'] = tmp_merge['des2'] df['mean_startdate_bureau'] = tmp_merge['des3'] df['sum_startdate_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
category_id_df = df[['Category', 'category_id']].drop_duplicates().sort_values('category_id' )<define_variables>
tmp = bureau[(~bureau['DAYS_CREDIT_ENDDATE'].isnull())&(( bureau['CREDIT_TYPE'] == "Consumer credit")) ].sort_values(by=['SK_ID_CURR','DAYS_CREDIT_ENDDATE']) tmp = tmp.groupby(['SK_ID_CURR'] ).nth(-2 ).reset_index() tmp = tmp[['SK_ID_CURR','DAYS_ENDDATE_FACT']] tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['1st_endatefact_bureau'] = tmp_merge['des']
Home Credit Default Risk
1,533,277
category_to_id = dict(category_id_df.values) id_to_category = dict(category_id_df[['category_id', 'Category']].values )<count_values>
tmp = bureau[(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['DAYS_ENDDATE_FACT'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_endatefact_bureau'] = tmp_merge['des1'] df['max_endatefact_bureau'] = tmp_merge['des2'] df['mean_endatefact_bureau'] = tmp_merge['des3'] df['sum_endatefact_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
df.groupby('Category' ).category_id.count() <categorify>
bureau['tmp'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT'] tmp = bureau.groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_deltaendate_bureau'] = tmp_merge['des1'] df['max_deltaendate_bureau'] = tmp_merge['des2'] df['mean_deltaendate_bureau'] = tmp_merge['des3'] df['sum_deltaendate_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english') features = tfidf.fit_transform(df.Text ).toarray() labels = df.category_id <sort_values>
bureau['tmp'] =(bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_CREDIT']) tmp = bureau[(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_duration_bureau'] = tmp_merge['des1'] df['max_duration_bureau'] = tmp_merge['des2'] df['mean_duration_bureau'] = tmp_merge['des3'] df['sum_duration_bureau'] = tmp_merge['des4'] bureau['tmp'] =(bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_CREDIT']) tmp = bureau[(bureau['CREDIT_TYPE'] != "Consumer credit")].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_duration_bureau_v1'] = tmp_merge['des1'] df['max_duration_bureau_v1'] = tmp_merge['des2'] df['mean_duration_bureau_v1'] = tmp_merge['des3'] df['sum_duration_bureau_v1'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
sorted(category_to_id.items() )<statistical_test>
bureau['tmp'] =(bureau['DAYS_ENDDATE_FACT'] - bureau['DAYS_CREDIT']) tmp = bureau[(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_durationfact_bureau'] = tmp_merge['des1'] df['max_durationfact_bureau'] = tmp_merge['des2'] df['mean_durationfact_bureau'] = tmp_merge['des3'] df['sum_durationfact_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
N = 3 for Category, category_id in sorted(category_to_id.items()): features_chi2 = chi2(features, labels == category_id) indices = np.argsort(features_chi2[0]) feature_names = np.array(tfidf.get_feature_names())[indices] unigrams = [v for v in feature_names if len(v.split(' ')) == 1] bigrams = [v for v in feature_names if len(v.split(' ')) == 2] print(" print(".Most correlated unigrams: .{}".format(' .'.join(unigrams[-N:]))) print(".Most correlated bigrams: .{}".format(' .'.join(bigrams[-N:])) )<define_variables>
bureau['tmp'] =(bureau['DAYS_ENDDATE_FACT'] - bureau['DAYS_CREDIT_ENDDATE'])/(bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE']) tmp = bureau[(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_sooner_bureau'] = tmp_merge['des1'] df['max_sooner_bureau'] = tmp_merge['des2'] df['mean_sooner_bureau'] = tmp_merge['des3'] df['sum_sooner_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
SAMPLE_SIZE = int(len(features)* 0.3) np.random.seed(0) indices = np.random.choice(range(len(features)) , size=SAMPLE_SIZE, replace=False) projected_features = TSNE(n_components=2, random_state=0 ).fit_transform(features[indices]) <filter>
bureau['tmp'] =(bureau['DAYS_ENDDATE_FACT'] - bureau['DAYS_CREDIT_ENDDATE'])/(bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE']) tmp = bureau[(~bureau['CREDIT_TYPE'].isin(['Credit card','Consumer credit'])) ].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_sooner_bureau_v1'] = tmp_merge['des1'] df['max_sooner_bureau_v1'] = tmp_merge['des2'] df['mean_sooner_bureau_v1'] = tmp_merge['des3'] df['sum_sooner_bureau_v1'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
my_id = 0 projected_features[(labels[indices] == my_id ).values]<choose_model_class>
bureau['tmp'] =(bureau['AMT_CREDIT_SUM'])/(bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE']) tmp = bureau[bureau['CREDIT_TYPE'] == "Credit card"].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_annuity_bureau'] = tmp_merge['des1'] df['max_annuity_bureau'] = tmp_merge['des2'] df['mean_annuity_bureau'] = tmp_merge['des3'] df['sum_annuity_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
models = [ RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0), MultinomialNB() , LogisticRegression(random_state=0), ] <create_dataframe>
bureau['tmp'] =(bureau['AMT_CREDIT_SUM'])/(bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE']) tmp = bureau[(bureau['CREDIT_ACTIVE'] == "Active")&(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_annuity_bureau_v1'] = tmp_merge['des1'] df['max_annuity_bureau_v1'] = tmp_merge['des2'] df['mean_annuity_bureau_v1'] = tmp_merge['des3'] df['sum_annuity_bureau_v1'] = tmp_merge['des4'] tmp = bureau[(bureau['CREDIT_ACTIVE'] == "Active")&(bureau['CREDIT_TYPE'] == "Consumer credit")].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_annuity_bureau_v2'] = tmp_merge['des1'] df['max_annuity_bureau_v2'] = tmp_merge['des2'] df['mean_annuity_bureau_v2'] = tmp_merge['des3'] df['sum_annuity_bureau_v2'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
CV = 5 cv_df = pd.DataFrame(index=range(CV * len(models))) entries = []<find_best_model_class>
bureau['tmp'] =(bureau['AMT_CREDIT_SUM'])/(bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE']) tmp = bureau[~bureau['CREDIT_TYPE'].isin(["Credit card","Consumer credit"])].groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_annuity_bureau_v3'] = tmp_merge['des1'] df['max_annuity_bureau_v3'] = tmp_merge['des2'] df['mean_annuity_bureau_v3'] = tmp_merge['des3'] df['sum_annuity_bureau_v3'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
for model in models: model_name = model.__class__.__name__ accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=CV) for fold_idx, accuracy in enumerate(accuracies): entries.append(( model_name, fold_idx, accuracy))<create_dataframe>
bureau['AMT_CREDIT_SUM_DEBT_v1'] = bureau['AMT_CREDIT_SUM_DEBT'].replace(0, np.nan) tmp = bureau[(bureau['CREDIT_ACTIVE'] == "Active")&(bureau['CREDIT_TYPE'].isin(["Credit card"])) ].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM_DEBT_v1'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_debt_bureau'] = tmp_merge['des1'] df['max_debt_bureau'] = tmp_merge['des2'] df['mean_debt_bureau'] = tmp_merge['des3'] df['sum_debt_bureau'] = tmp_merge['des4'] tmp = bureau[(bureau['CREDIT_ACTIVE'] == "Active")&(bureau['CREDIT_TYPE'].isin(["Consumer credit"])) ].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM_DEBT_v1'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_debt_bureau_v1'] = tmp_merge['des1'] df['max_debt_bureau_v1'] = tmp_merge['des2'] df['mean_debt_bureau_v1'] = tmp_merge['des3'] df['sum_debt_bureau_v1'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'] )<groupby>
bureau['AMT_CREDIT_SUM_LIMIT_v1'] = bureau['AMT_CREDIT_SUM_LIMIT'].replace(0, np.nan) tmp = bureau[(bureau['CREDIT_TYPE'].isin(["Credit card"])) ].groupby(['SK_ID_CURR'])['AMT_CREDIT_SUM_LIMIT_v1'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_limit_bureau'] = tmp_merge['des1'] df['max_limit_bureau'] = tmp_merge['des2'] df['mean_limit_bureau'] = tmp_merge['des3'] df['sum_limit_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
cv_df.groupby('model_name' ).accuracy.mean()<train_model>
bureau['AMT_CREDIT_MAX_OVERDUE_v1'] = bureau['AMT_CREDIT_MAX_OVERDUE'].replace(0,np.nan) tmp = bureau[(bureau['CREDIT_TYPE'].isin(["Consumer credit"])) ].groupby(['SK_ID_CURR'])['AMT_CREDIT_MAX_OVERDUE_v1'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_overdue_bureau'] = tmp_merge['des1'].fillna(0) df['max_overdue_bureau'] = tmp_merge['des2'].fillna(0) df['mean_overdue_bureau'] = tmp_merge['des3'].fillna(0) df['sum_overdue_bureau'] = tmp_merge['des4'].fillna(0 )
Home Credit Default Risk
1,533,277
model = LogisticRegression(random_state=0) X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, df.index, test_size=0.33, random_state=0) model.fit(X_train, y_train) y_pred_proba = model.predict_proba(X_test) y_pred = model.predict(X_test )<compute_test_metric>
bureau['tmp'] = bureau['AMT_CREDIT_SUM_DEBT']/bureau['AMT_CREDIT_SUM'] tmp = bureau.groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_ratio_debt_credit_bureau'] = tmp_merge['des1'] df['max_ratio_debt_credit_bureau'] = tmp_merge['des2'] df['mean_ratio_debt_credit_bureau'] = tmp_merge['des3'] df['sum_ratio_debt_credit_bureau'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
for predicted in category_id_df.category_id: for actual in category_id_df.category_id: if predicted != actual and conf_mat[actual, predicted] >= 2: print("'{}' predicted as '{}' : {} examples.".format(id_to_category[actual], id_to_category[predicted], conf_mat[actual, predicted])) display(df.loc[indices_test[(y_test == actual)&(y_pred == predicted)]]['Text']) print('' )<train_model>
bureau['tmp'] = bureau['AMT_ANNUITY'].fillna(0) tmp = bureau.groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_annuity_bureau_v2'] = tmp_merge['des1'] df['max_annuity_bureau_v2'] = tmp_merge['des2'] df['mean_annuity_bureau_v2'] = tmp_merge['des3'] df['sum_annuity_bureau_v2'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
model.fit(features, labels )<features_selection>
install = pd.read_csv(".. /input/installments_payments.csv" )
Home Credit Default Risk
1,533,277
N = 5 for Category, category_id in sorted(category_to_id.items()): indices = np.argsort(model.coef_[category_id]) feature_names = np.array(tfidf.get_feature_names())[indices] unigrams = [v for v in reversed(feature_names)if len(v.split(' ')) == 1][:N] bigrams = [v for v in reversed(feature_names)if len(v.split(' ')) == 2][:N] print(" print(".Top unigrams: .{}".format(' .'.join(unigrams))) print(".Top bigrams: .{}".format(' .'.join(bigrams)) )<predict_on_test>
tmp = install.groupby(['SK_ID_PREV','SK_ID_CURR'] ).agg({'NUM_INSTALMENT_NUMBER':["count","max"]} ).reset_index() tmp.columns = ['SK_ID_PREV','SK_ID_CURR','count','max'] tmp['delta'] = tmp['count'] - tmp['max'] tmp = tmp.merge(previous[['SK_ID_PREV','CNT_PAYMENT','DAYS_LAST_DUE','NAME_CONTRACT_TYPE']], on=['SK_ID_PREV'], how='left') tmp_1 = tmp.groupby(['SK_ID_CURR'])['delta'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp_1.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp_1, on=['SK_ID_CURR'], how='left') df['min_delta_num_install'] = tmp_merge['des1'] df['max_delta_num_install'] = tmp_merge['des2'] df['mean_delta_num_install'] = tmp_merge['des3'] df['sum_delta_num_install'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
texts = ["Hooli stock price soared after a dip in PiedPiper revenue growth.", "Captain Tsubasa scores a magnificent goal for the Japanese team.", "Merryweather mercenaries are sent on another mission, as government oversight groups call for new sanctions.", "Beyoncé releases a new album, tops the charts in all of south-east Asia!", "You won't guess what the latest trend in data analysis is!"] text_features = tfidf.transform(texts) predictions = model.predict(text_features) for text, predicted in zip(texts, predictions): print('"{}"'.format(text)) print(" - Predicted as: '{}'".format(id_to_category[predicted])) print("" )<load_from_csv>
tmp = install.groupby(['SK_ID_PREV','SK_ID_CURR'] ).agg({'NUM_INSTALMENT_VERSION':["count","max"]} ).reset_index() tmp.columns = ['SK_ID_PREV','SK_ID_CURR','count','max'] tmp = tmp.merge(previous[['SK_ID_PREV','CNT_PAYMENT','DAYS_LAST_DUE','NAME_CONTRACT_TYPE']], on=['SK_ID_PREV'], how='left') tmp_1 = tmp.groupby(['SK_ID_CURR'])['max'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp_1.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp_1, on=['SK_ID_CURR'], how='left') df['min_max_version_install'] = tmp_merge['des1'] df['max_max_version_install'] = tmp_merge['des2'] df['mean_max_version_install'] = tmp_merge['des3'] df['sum_max_version_install'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
TEST_PATH = os.path.join(".. /input/bbc-test", "BBC News Test.csv") test_df = pd.read_csv(TEST_PATH) <predict_on_test>
tmp = install.groupby(['SK_ID_PREV','SK_ID_CURR'] ).agg({'NUM_INSTALMENT_NUMBER':["count","max"]} ).reset_index() tmp.columns = ['SK_ID_PREV','SK_ID_CURR','count','max'] tmp['delta'] = tmp['count']/tmp['max'] tmp = tmp.merge(previous[['SK_ID_PREV','CNT_PAYMENT','DAYS_LAST_DUE','NAME_CONTRACT_TYPE']], on=['SK_ID_PREV'], how='left') tmp_1 = tmp.groupby(['SK_ID_CURR'])['delta'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp_1.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp_1, on=['SK_ID_CURR'], how='left') df['min_ratio_num_install'] = tmp_merge['des1'] df['max_ratio_num_install'] = tmp_merge['des2'] df['mean_ratio_num_install'] = tmp_merge['des3'] df['sum_ratio_num_install'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
test_features = tfidf.transform(test_df.Text.tolist()) Y_pred = model.predict(test_features) Y_pred<define_variables>
tmp = install[['SK_ID_PREV','SK_ID_CURR','NUM_INSTALMENT_NUMBER','AMT_INSTALMENT']].drop_duplicates()
Home Credit Default Risk
1,533,277
Y_pred_name =[] for cat_id in Y_pred : Y_pred_name.append(id_to_category[cat_id] )<create_dataframe>
tmp = tmp.groupby(['SK_ID_PREV','SK_ID_CURR'])['AMT_INSTALMENT'].sum().reset_index() tmp.columns = ['SK_ID_PREV','SK_ID_CURR','need_to_pay']
Home Credit Default Risk
1,533,277
submission = pd.DataFrame({ "ArticleId": test_df["ArticleId"], "Category": Y_pred_name } )<save_to_csv>
tmp_1 = install.groupby(['SK_ID_PREV'])['AMT_PAYMENT'].sum().reset_index() tmp_1.columns = ['SK_ID_PREV','paid']
Home Credit Default Risk
1,533,277
submission.to_csv('submission.csv', index=False )<set_options>
tmp = tmp.merge(tmp_1, on=['SK_ID_PREV'], how='left' )
Home Credit Default Risk
1,533,277
%matplotlib inline try: except ImportError as e: print('scikit-image is too new, ',e) <set_options>
payment_history = tmp payment_history['ratio'] = payment_history['paid']/payment_history['need_to_pay'] payment_history['delta'] = payment_history['need_to_pay'] - payment_history['paid'] payment_history = payment_history.merge(previous[['SK_ID_PREV','AMT_ANNUITY','CNT_PAYMENT','NAME_CONTRACT_TYPE']], \ on = ['SK_ID_PREV'], how='left') payment_history['all_credit'] = payment_history['AMT_ANNUITY'] * payment_history['CNT_PAYMENT'] payment_history['ratio'] = payment_history['paid']/payment_history['all_credit'] payment_history['delta'] = payment_history['all_credit'] - payment_history['paid'] tmp = install.groupby(['SK_ID_PREV'])['NUM_INSTALMENT_VERSION'].mean().reset_index() tmp.columns = ['SK_ID_PREV','mean_version'] payment_history = payment_history.merge(tmp, on=['SK_ID_PREV'], how='left' )
Home Credit Default Risk
1,533,277
use_cuda = True device = torch.device("cuda" if use_cuda else "cpu") torch.manual_seed(42 )<train_model>
tmp = payment_history[payment_history['mean_version'] > 0].groupby(['SK_ID_CURR'])['ratio'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_ratio_paid_install'] = tmp_merge['des1'] df['max_ratio_paid_install'] = tmp_merge['des2'] df['mean_ratio_paid_install'] = tmp_merge['des3'] df['sum_ratio_paid_install'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
class AffMNISTDataset(Dataset): def __init__(self, data_name): with np.load(os.path.join(DATA_ROOT_PATH, '{}.npz'.format(data_name)))as npz_data: self.img_vec = npz_data['img'] self.idx_vec = npz_data['idx'] print('image shape', self.img_vec.shape) print('idx shape', self.idx_vec.shape) label_path = os.path.join(DATA_ROOT_PATH, '{}_labels.csv'.format(data_name)) if os.path.exists(label_path): label_df = pd.read_csv(label_path) self.lab_dict = dict(zip(label_df['idx'], label_df['label'])) else: self.lab_dict = {x:x for x in self.idx_vec} def __len__(self): return len(self.img_vec) def __getitem__(self, idx): out_label = self.lab_dict[self.idx_vec[idx]] out_vec = np.array([out_label], dtype='int') img = self.img_vec[idx].astype('float32') return img, int(out_label )<feature_engineering>
tmp = payment_history[payment_history['mean_version'] > 0].groupby(['SK_ID_CURR'])['delta'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_delta_paid_install'] = tmp_merge['des1'] df['max_delta_paid_install'] = tmp_merge['des2'] df['mean_delta_paid_install'] = tmp_merge['des3'] df['sum_delta_paid_install'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
class SimpleMLP(nn.Module): def __init__(self): super(SimpleMLP, self ).__init__() self.fc1 = nn.Linear(40*40, 512) self.fc2 = nn.Linear(512, 10) def forward(self, x): x = x.view(-1, 40*40) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1 )<create_dataframe>
tmp = install.groupby(['SK_ID_PREV','SK_ID_CURR'] ).agg({'NUM_INSTALMENT_NUMBER':["count","max"]} ).reset_index() tmp.columns = ['SK_ID_PREV','SK_ID_CURR','count','max'] tmp['delta'] = tmp['count']/tmp['max'] tmp = tmp.merge(previous[['SK_ID_PREV','CNT_PAYMENT','DAYS_LAST_DUE','NAME_CONTRACT_TYPE']], on=['SK_ID_PREV'], how='left') tmp_1 = tmp.groupby(['SK_ID_CURR'])['max'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp_1.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp_1, on=['SK_ID_CURR'], how='left') df['min_max_num_install'] = tmp_merge['des1'] df['max_max_num_install'] = tmp_merge['des2'] df['mean_max_num_install'] = tmp_merge['des3'] df['sum_max_num_install'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
train_ds = AffMNISTDataset('train') train_loader = DataLoader(train_ds, batch_size=1024, shuffle=True, num_workers=4 )<train_model>
tmp = install.groupby(['SK_ID_PREV'])['AMT_PAYMENT'].max().reset_index() tmp.columns = ['SK_ID_PREV','max_install'] payment_history = payment_history.merge(tmp, on=['SK_ID_PREV'], how='left' )
Home Credit Default Risk
1,533,277
model = SimpleMLP().to(device) model.train()<choose_model_class>
payment_history['tmp'] = payment_history['max_install']/payment_history['AMT_ANNUITY'] tmp = payment_history[payment_history['mean_version'] > 0].groupby(['SK_ID_CURR'])['tmp']
Home Credit Default Risk
1,533,277
optimizer = optim.SGD(model.parameters() , lr=1e-3 )<train_model>
tmp = install.groupby(['SK_ID_PREV'])['NUM_INSTALMENT_NUMBER'].max().reset_index() tmp.columns = ['SK_ID_PREV','max_num_install'] payment_history = payment_history.merge(tmp, on=['SK_ID_PREV'], how='left' )
Home Credit Default Risk
1,533,277
log_interval = 100 for epoch in range(1): for batch_idx,(data, target)in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % log_interval == 0: print('Train Epoch: {} [{}/{}({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100.* batch_idx / len(train_loader), loss.item()))<create_dataframe>
tmp = install[install['AMT_INSTALMENT'] > install['AMT_PAYMENT']] tmp = tmp.groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_small_payment'] = tmp_merge['des'].fillna(0) for i in [0, 5, 10, 15, 20, 25, 30, 40, 50, 60]: print(i) tmp = install[(install['DAYS_ENTRY_PAYMENT'] - install['DAYS_INSTALMENT'])> i] tmp = tmp.groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_late_payment_' + str(i)] = tmp_merge['des'].fillna(0)
Home Credit Default Risk
1,533,277
test_ds = AffMNISTDataset('test') test_loader = DataLoader(test_ds, batch_size=1024, shuffle=True, num_workers=4 )<categorify>
install['tmp'] = install['AMT_PAYMENT']/install['AMT_INSTALMENT'] for i in range(10): print(i) tmp = install[(install['tmp'] > i/10)&(install['tmp'] <(( i+1)/10)) ] tmp = tmp.groupby(['SK_ID_CURR'])['SK_ID_PREV'].count().reset_index() tmp.columns = ['SK_ID_CURR','des'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['count_ratio_payment_' + str(i)] = tmp_merge['des'].fillna(0)
Home Credit Default Risk
1,533,277
target_out = [] pred_out = [] for batch_idx,(data, target_idx)in enumerate(test_loader): data = data.to(device) target_idx = target_idx.to('cpu' ).numpy() output = model(data) pred = output.to('cpu' ).data.max(1)[1].numpy() target_out += [target_idx] pred_out += [pred]<save_to_csv>
tmp = install.groupby(['SK_ID_PREV','NUM_INSTALMENT_NUMBER'])['DAYS_INSTALMENT'].count().reset_index() tmp = tmp[tmp['DAYS_INSTALMENT'] > 1] tmp.columns = ['SK_ID_PREV','NUM_INSTALMENT_NUMBER','count_dup'] install = install.merge(tmp, on = ['SK_ID_PREV','NUM_INSTALMENT_NUMBER'], how='left') dup_install = install[install['count_dup'] > 1] dup_install.reset_index(drop=True, inplace = True)
Home Credit Default Risk
1,533,277
pred_df = pd.DataFrame({'idx': np.concatenate(target_out, 0), 'label': np.concatenate(pred_out, 0)}) pred_df.to_csv('mlp_predictions.csv', index=False) pred_df.sample(5 )<set_options>
tmp = install[(install['AMT_PAYMENT'] < install['AMT_INSTALMENT'])&(install['DAYS_ENTRY_PAYMENT'] < install['DAYS_INSTALMENT'])] tmp['ratio'] = tmp['AMT_PAYMENT']/tmp['AMT_INSTALMENT'] tmp = dup_install.groupby(['SK_ID_CURR'])['AMT_PAYMENT'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp_1.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp_1, on=['SK_ID_CURR'], how='left') df['min_special_install'] = tmp_merge['des1'] df['max_special_install'] = tmp_merge['des2'] df['mean_special_install'] = tmp_merge['des3'] df['sum_special_install'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
sns.set_context("paper") print(tf.__version__ )<load_from_csv>
dup_install.sort_values(by=['SK_ID_PREV','NUM_INSTALMENT_NUMBER','DAYS_ENTRY_PAYMENT'] )
Home Credit Default Risk
1,533,277
typetable = pd.read_csv(".. /input/pokemon-type-table/typetable.csv") vals = [] for c1 in typetable.columns[1:]: vals.append(pd.DataFrame({ "idx": typetable["atck"].map(lambda x: "%s-vs-%s-None" %(x, c1)) , "mul": typetable[c1], })) for c2 in typetable.columns[1:]: vals.append(pd.DataFrame({ "idx": typetable["atck"].map(lambda x: "%s-vs-%s-%s" %(x, c1, c2)) , "mul": typetable[c1] * typetable[c2], })) mult = pd.concat(vals ).reset_index().drop(["index"], axis=1) mult = dict(zip(mult.values[:,0], mult.values[:,1])) def multiplier(cat): return mult.get(cat, 0) print(multiplier("Water-vs-Fire-None")) print(multiplier("Water-vs-Fire-Grass")) print(multiplier("Fire-vs-Water-Fire")) print(multiplier("Fire-vs-Grass-Bug")) print(multiplier("None-vs-Grass-Bug"))<train_model>
credit = pd.read_csv(".. /input/credit_card_balance.csv" )
Home Credit Default Risk
1,533,277
class ModelCallback(keras.callbacks.Callback): def set_params(self, params): self.epochs = params["epochs"] def on_epoch_end(self, epoch, epoch_logs): print("\r", "%5s/%-5s " %(epoch + 1, self.epochs), end="") for k, v in epoch_logs.items() : print("%s: %04.4f "%(k, v), end="" )<load_from_csv>
credit['tmp'] = credit['AMT_BALANCE']/credit['AMT_CREDIT_LIMIT_ACTUAL'] tmp = credit.groupby(["SK_ID_CURR","SK_ID_PREV"])['tmp'].max().reset_index() tmp = tmp.groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_max_ratio_balance_limit_credit'] = tmp_merge['des1'] df['max_max_ratio_balance_limit_credit'] = tmp_merge['des2'] df['mean_max_ratio_balance_limit_credit'] = tmp_merge['des3'] df['sum_max_ratio_balance_limit_credit'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
to_underscore = lambda x: re.sub("[^0-9a-zA-Z load_csv = lambda file: pd.read_csv(".. /input/pokemon-challenge-mlh/%s.csv" % file ).rename(to_underscore, axis='columns') to_underscore("First Colum.Value" )<feature_engineering>
credit['tmp'] = credit['AMT_BALANCE']/credit['AMT_CREDIT_LIMIT_ACTUAL'] tmp = credit.groupby(["SK_ID_CURR","SK_ID_PREV"])['tmp'].min().reset_index() tmp = credit.groupby(['SK_ID_CURR'])['tmp'].agg({"returns": [np.min, np.max,np.mean, np.sum]})\ .reset_index() tmp.columns = ['SK_ID_CURR','des1','des2','des3', 'des4'] for df in [train,test]: tmp_merge = df[['SK_ID_CURR']] tmp_merge = tmp_merge.merge(tmp, on=['SK_ID_CURR'], how='left') df['min_min_ratio_balance_limit_credit'] = tmp_merge['des1'] df['max_min_ratio_balance_limit_credit'] = tmp_merge['des2'] df['mean_min_ratio_balance_limit_credit'] = tmp_merge['des3'] df['sum_min_ratio_balance_limit_credit'] = tmp_merge['des4']
Home Credit Default Risk
1,533,277
def df_pokemon() : pokemon = load_csv("pokemon" ).fillna("None") pokemon = pokemon.drop(["name", "generation", "legendary"], axis = 1) pokemon["speed2"] = pokemon["speed"] ** 2 pokemon["speed2"] = pokemon["speed2"] / pokemon["speed2"].max() pokemon["speed"] = pokemon["speed"] / pokemon["speed"].max() pokemon["hp"] = pokemon["hp"] / pokemon["hp"].max() mx = max([pokemon["defense"].max() , pokemon["sp_def"].max() ]) pokemon["defense"] = pokemon["defense"] / mx pokemon["sp_def"] = pokemon["sp_def"] / mx mx = max([pokemon["attack"].max() , pokemon["sp_atk"].max() ]) pokemon["attack"] = pokemon["attack"] / mx pokemon["sp_atk"] = pokemon["sp_atk"] / mx t1 = pd.get_dummies(pokemon["type_1"], prefix='t1_') t2 = pd.get_dummies(pokemon["type_2"], prefix='t2_') ds = [pokemon, t1, t2] pokemon = pd\ .concat(ds,axis=1)\ .rename(to_underscore, axis='columns') return pokemon<merge>
doc = [x for x in train.columns if 'FLAG_DOC' in x] connection = ['FLAG_MOBIL', 'FLAG_EMP_PHONE', 'FLAG_WORK_PHONE', 'FLAG_CONT_MOBILE', 'FLAG_PHONE', 'FLAG_EMAIL',] le = LabelEncoder() categorical = ['CODE_GENDER','FLAG_OWN_CAR','FLAG_OWN_REALTY','NAME_EDUCATION_TYPE', 'NAME_FAMILY_STATUS', 'FLAG_MOBIL','FLAG_EMP_PHONE','FLAG_CONT_MOBILE','FLAG_EMAIL','FLAG_WORK_PHONE', 'OCCUPATION_TYPE','ORGANIZATION_TYPE_v2', 'NAME_INCOME_TYPE','NAME_HOUSING_TYPE','NAME_TYPE_SUITE', 'NAME_CONTRACT_TYPE'] for i in categorical: train[i.lower() ] = le.fit_transform(train[i].fillna("NA")) test[i.lower() ] = le.transform(test[i].fillna("NA")) for df in [train,test]: df['ratio_credit_annity'] = df['AMT_CREDIT']/df['AMT_ANNUITY'] df['ratio_credit_goods'] = df['AMT_CREDIT']/df['AMT_GOODS_PRICE'] df['ratio_min_annuity'] = df['AMT_INCOME_TOTAL']/df['min_amt_annuity'] df['ratio_max_annuity'] = df['AMT_INCOME_TOTAL']/df['max_amt_annuity'] df['ratio_mean_annuity'] = df['AMT_INCOME_TOTAL']/df['mean_amt_annuity'] tmp = df[df['NAME_CONTRACT_TYPE'] == "Revolving loans"].index df['ratio_credit_goods'].iloc[tmp] = np.nan df['ratio_credit_annity'].replace(20, np.nan, inplace = True) df['doc'] = df[doc].mean(axis=1) df['count_null_cash_loans'].replace(np.nan, 0, inplace = True) df['count_null_revolving_loans'].replace(np.nan, 0, inplace = True) df['ratio_cntpay_cur_mean'] = df['ratio_credit_annity']/df['mean_cntpay'] df['ratio_cntpay_cur_min'] = df['ratio_credit_annity']/df['min_cntpay'] df['ratio_cntpay_cur_max'] = df['ratio_credit_annity']/df['max_cntpay'] df['delta_bureau_HC'] = df['max_day_lastdue'] - df['max_endate_bureau'] df['frequency_bureau'] =(df['max_endate_bureau'] - df['min_endate_bureau'])/(df['count_active_bureau_v2']) df['frequency_bureau'].replace(0, np.nan) df['sum_delta_install_credit_curr'] = df['AMT_CREDIT'] + df['sum_delta_paid_install'] df['strenght_income'] = df['sum_delta_install_credit_curr']/df['AMT_INCOME_TOTAL'] df['sum_notfinish'] = df['sum_notfinish_cash_loans'] + df['sum_notfinish_consumer_loans'] df['ratio_income_notfinish'] = df['sum_notfinish']/df['AMT_CREDIT'] df['connection'] = df[connection].mean(axis=1) df['living'] = df[['REG_REGION_NOT_LIVE_REGION', 'REG_REGION_NOT_WORK_REGION', 'LIVE_REGION_NOT_WORK_REGION', 'REG_CITY_NOT_LIVE_CITY', 'REG_CITY_NOT_WORK_CITY', 'LIVE_CITY_NOT_WORK_CITY']].mean(axis=1) predictors = ['EXT_SOURCE_1','EXT_SOURCE_2','EXT_SOURCE_3','CNT_CHILDREN', 'AMT_INCOME_TOTAL','AMT_CREDIT','AMT_ANNUITY','AMT_GOODS_PRICE', 'CNT_FAM_MEMBERS', 'DAYS_BIRTH','DAYS_EMPLOYED','DAYS_REGISTRATION', 'DAYS_ID_PUBLISH', 'DAYS_LAST_PHONE_CHANGE', 'OBS_30_CNT_SOCIAL_CIRCLE', 'DEF_30_CNT_SOCIAL_CIRCLE', 'OWN_CAR_AGE','REGION_POPULATION_RELATIVE', 'AMT_REQ_CREDIT_BUREAU_YEAR', 'REGION_RATING_CLIENT','REGION_RATING_CLIENT_W_CITY','TOTALAREA_MODE','APARTMENTS_AVG', 'ratio_credit_annity','ratio_credit_goods','doc','connection', 'min_amt_app','max_amt_app','mean_amt_app', 'min_amt_app_v1','max_amt_app_v1','mean_amt_app_v1', 'min_amt_card','max_amt_card','mean_amt_card', 'min_amt_app_fail','max_amt_app_fail','mean_amt_app_fail', 'min_amt_app_v1_fail','max_amt_app_v1_fail','mean_amt_app_v1_fail', 'min_amt_card_fail','max_amt_card_fail','mean_amt_card_fail', 'min_amt_annuity', 'max_amt_annuity', 'mean_amt_annuity', 'min_cntpay', 'max_cntpay', 'mean_cntpay', 'min_day_decision','max_day_decision', 'min_day_termination','max_day_termination', 'max_day_lastdue', 'min_firstdraw', 'max_firstdraw', 'min_firstdraw_decision', 'max_firstdraw_decision', 'mean_firstdraw_decision', 'min_firstdraw_firstdue', 'max_firstdraw_firstdue', 'mean_firstdraw_firstdue', 'min_firstdraw_lastdue', 'max_firstdraw_lastdue', 'mean_firstdraw_lastdue', 'max_day_decision_fail', 'count_notfinish_revolving_loans','count_notfinish_cash_loans','count_notfinish_consumer_loans', 'min_sooner', 'max_sooner', 'mean_sooner', 'min_seller', 'max_seller', 'mean_seller', 'sum_notfinish', 'min_amt_goods_v1','max_amt_goods_v1','mean_amt_goods_v1', '1st_recent_app','1st_recent_credit','1st_recent_card', '1st_recent_app_fail','1st_recent_credit_fail','1st_recent_card_fail', '1st_recent_ratedown','1st_recent_ratedown_fail', '1st_recent_cntpay', 'ratio_cntpay_cur_mean', 'count_cash_loans','count_consumer_loans','count_clear_reason', 'count_middle', 'count_low_normal', 'count_high', 'count_low_action', 'ratio_middle', 'ratio_low_normal', 'ratio_high', 'ratio_low_action', 'count_active_bureau','count_closed_bureau', 'count_active_bureau_v2', 'min_active_credit_bureau','max_active_credit_bureau', 'mean_active_credit_bureau', 'min_closed_credit_bureau','max_closed_credit_bureau', 'mean_closed_credit_bureau', 'min_active_credit_bureau_v1','max_active_credit_bureau_v1', 'mean_active_credit_bureau_v1', 'min_active_credit_bureau_v2','max_active_credit_bureau_v2', 'mean_active_credit_bureau_v2', 'min_active_credit_bureau_v3','max_active_credit_bureau_v3', 'mean_active_credit_bureau_v3', 'max_endate_bureau', '1st_endate_bureau', 'max_endatefact_bureau', 'min_deltaendate_bureau','max_deltaendate_bureau','mean_deltaendate_bureau', 'min_duration_bureau','max_duration_bureau','mean_duration_bureau', 'min_sooner_bureau','max_sooner_bureau','mean_sooner_bureau', 'min_annuity_bureau','max_annuity_bureau','mean_annuity_bureau', 'min_debt_bureau','max_debt_bureau','mean_debt_bureau','sum_debt_bureau', 'min_debt_bureau_v1','max_debt_bureau_v1','mean_debt_bureau_v1', 'min_limit_bureau','max_limit_bureau','mean_limit_bureau', 'min_overdue_bureau','max_overdue_bureau','mean_overdue_bureau', 'min_ratio_debt_credit_bureau','max_ratio_debt_credit_bureau','mean_ratio_debt_credit_bureau', 'min_delta_num_install','max_delta_num_install','mean_delta_num_install', 'min_ratio_num_install','max_ratio_num_install','mean_ratio_num_install', 'min_max_version_install','max_max_version_install','mean_max_version_install', 'min_ratio_paid_install','max_ratio_paid_install','mean_ratio_paid_install', 'sum_delta_paid_install','sum_delta_install_credit_curr', 'min_max_num_install','max_max_num_install','mean_max_num_install', 'count_small_payment','count_late_payment_0','count_late_payment_10','count_late_payment_20','count_late_payment_30', 'min_max_ratio_balance_limit_credit','max_max_ratio_balance_limit_credit','mean_max_ratio_balance_limit_credit', ] +\ [i.lower() for i in categorical] categorical = [i.lower() for i in categorical]
Home Credit Default Risk
1,533,277
def merge_data(battles, pokemon): battles = battles \ .merge(pokemon.rename(lambda x: "f_%s" % x, axis="columns"), left_on="first_pokemon", right_on="f_ .merge(pokemon.rename(lambda x: "s_%s" % x, axis="columns"), left_on="second_pokemon", right_on="s_ battles["f_t1"] =(battles.f_type_1 + "-vs-" + battles.s_type_1 + "-" + battles.get("s_type_2", "None")).map(multiplier) battles["s_t1"] =(battles.s_type_1 + "-vs-" + battles.f_type_1 + "-" + battles.get("f_type_2", "None")).map(multiplier) battles["f_t2"] =(battles.f_type_2 + "-vs-" + battles.s_type_1 + "-" + battles.get("s_type_2", "None")).map(multiplier) battles["s_t2"] =(battles.s_type_2 + "-vs-" + battles.f_type_1 + "-" + battles.get("f_type_2", "None")).map(multiplier) battles["f_t"] = battles[["f_t1", "f_t2"]].max(axis=1) battles["s_t"] = battles[["s_t1", "s_t2"]].max(axis=1) battles["f_t_min"] = battles[["f_t1", "f_t2"]].min(axis=1) battles["s_t_min"] = battles[["s_t1", "s_t2"]].min(axis=1) battles = battles.drop(["f_type_1", "s_type_1", "f_type_2", "s_type_2", "f_t1", "f_t2", "s_t1", "s_t2"], axis=1) battles = battles\ .sort_values(['battle_number'])\ .reset_index(drop=True)\ .drop(["battle_number", "first_pokemon", "second_pokemon", "f_ return battles<categorify>
NFOLDS = 5 kf = StratifiedKFold(n_splits=NFOLDS, shuffle=True, random_state=2018) pred_test_full = 0 params = { 'boosting': 'gbdt', 'objective': 'binary', 'metric': 'auc', 'learning_rate': 0.01, 'num_leaves': 40, 'max_depth': 7, 'colsample_bytree': 0.15, 'seed': 101 } res = [] idx = 0 for dev_index, val_index in kf.split(train, train['TARGET'].values): dev, valid = train.loc[dev_index,:], train.loc[val_index,:] dtrain = lgb.Dataset(dev[predictors].values, label=dev['TARGET'].values, feature_name=predictors, categorical_feature=categorical ) dvalid = lgb.Dataset(valid[predictors].values, label=valid['TARGET'].values, feature_name=predictors, categorical_feature=categorical ) print("Training the model...") lgb_model = lgb.train(params, dtrain, valid_sets=[dtrain, dvalid], valid_names=['train','valid'], num_boost_round= 30000, early_stopping_rounds=500, verbose_eval=100, feval=None) oof = pd.DataFrame() oof['id'] = valid['SK_ID_CURR'].values oof['target'] = valid['TARGET'].values oof['preds'] = lgb_model.predict(valid[predictors],num_iteration=lgb_model.best_iteration) res.append(oof) pred_test_full += lgb_model.predict(test[predictors],num_iteration=lgb_model.best_iteration) sub = pd.read_csv(".. /input/sample_submission.csv") sub['TARGET'] = pred_test_full/NFOLDS sub.to_csv("sub_lgb.csv", index=False) res = pd.concat(res, ignore_index=True) res.to_csv("oof_lgb.csv", index=False) print(roc_auc_score(res['target'], res['preds']))
Home Credit Default Risk
1,533,277
def params(train, units=[]): X, Xs = to_model(train) Y = train["winner"].values y = keras.utils.to_categorical(Y) assert X.shape[0] == y.shape[0] input_dim = X.shape[1] outuput_dim = 1 if len(y.shape)== 1 else y.shape[1] samples = train.shape[0] print("𝑁𝑖: %s, 𝑁𝑜: %s, 𝑁𝑠: %s" %(input_dim, outuput_dim, samples)) print("units: sum(%s)= %s" %(units, sum(units))) return(input_dim, outuput_dim, units, X, Xs, Y, y )<drop_column>
tmp = install.groupby(['SK_ID_PREV','NUM_INSTALMENT_NUMBER'])['DAYS_INSTALMENT'].count().reset_index() tmp = tmp[tmp['DAYS_INSTALMENT'] > 1] tmp.columns = ['SK_ID_PREV','NUM_INSTALMENT_NUMBER','count_dup'] install = install.merge(tmp, on = ['SK_ID_PREV','NUM_INSTALMENT_NUMBER'], how='left') tmp = install[install['count_dup'] > 1]
Home Credit Default Risk
1,533,277
def to_model(X): if "winner" in X: X = X.drop(["winner"], axis=1) X = X.values return X, X<count_duplicates>
install.drop(['count_dup_x','count_dup_y'], axis=1, inplace = True )
Home Credit Default Risk
1,533,277
def remove_duplicate_battles(battles): return battles\ .groupby(["first_pokemon", "second_pokemon", "winner"])\ .count() \ .reset_index() \ .drop(["battle_number"],axis=1)\ .sample(frac=1)\ .reset_index(drop=True)\ .reset_index() \ .rename(columns={"index": "battle_number"} )<load_from_csv>
tmp.sort_values(by=['SK_ID_PREV','NUM_INSTALMENT_NUMBER','DAYS_ENTRY_PAYMENT'] )
Home Credit Default Risk
1,533,277
battles_values = load_csv("battles" ).values acc100 = dict(zip(map(tuple, battles_values[:, 1:-1]), map(tuple, battles_values[:, -1:])) )<train_model>
tmp['SK_ID_CURR'].value_counts()
Home Credit Default Risk
1,316,642
epochs, validation_split =(600, 0.0) history = model.fit(Xs, y, workers=4, epochs=epochs, verbose=0, validation_split=validation_split, batch_size=512, shuffle=True, callbacks=[ModelCallback() ], initial_epoch=0) plot_history(( history), title="", since=0 )<save_to_csv>
import matplotlib.pyplot as plt import lightgbm as lgb import gc from sklearn.model_selection import KFold, cross_val_score from sklearn.metrics import confusion_matrix,precision_recall_curve,auc,roc_auc_score,roc_curve,recall_score,classification_report from sklearn.preprocessing import LabelEncoder
Home Credit Default Risk
1,316,642
test = load_csv("test") X, Xs = to_model(merge_data(super_battle(load_csv("test")) , pokemon)) prediction = model.predict_classes(Xs) df_submission = pd.DataFrame({"Winner": prediction}, index = test.index.rename("battle_number")) for i, p1, p2 in load_csv("test" ).values: if(p1, p2)in acc100: df_submission.iloc[i] = acc100[(p1, p2)][0] df_submission.to_csv("solution.csv" )<load_pretrained>
app_train = pd.read_csv('.. /input/application_train.csv') print('Training data shape: ', app_train.shape) app_train.head()
Home Credit Default Risk
1,316,642
model.save_weights("cp-{epoch:04d}.ckpt".format(epoch=epochs)) ! mkdir "saved_models" saved_model_path = tf.contrib.saved_model.save_keras_model(model, "./saved_models" )<load_from_csv>
app_test = pd.read_csv('.. /input/application_test.csv') print('Testing data shape: ', app_test.shape) app_test.head()
Home Credit Default Risk
1,316,642
battles = pd.read_csv(".. /input/battles.csv") pokemon = pd.read_csv(".. /input/pokemon.csv") pokemon.describe()<feature_engineering>
app_train['TARGET'].value_counts() print('The proportion of label 1 is %.2f' %(sum(app_train['TARGET']==1)/app_train.shape[0]*100), '%' )
Home Credit Default Risk
1,316,642
def normalizeColumns(columns, dataset): for column in columns: dataset[column] =(dataset[column] - dataset[column].mean())/ dataset[column].std()<categorify>
def missing_values_table(df): mis_val = df.isnull().sum() mis_val_percent = df.isnull().sum() * 100 / df.shape[0] mis_val_table = pd.concat([mis_val, mis_val_percent], axis = 1) mis_val_table_rename_columns = mis_val_table.rename(columns = {0: 'Missing Values', 1: 'Percentage'}) mis_val_table_rename_columns = mis_val_table_rename_columns[ mis_val_table_rename_columns.iloc[:, 1]!=0].sort_values('Percentage', ascending=False ).round(1) print('The total dataframe has ' + str(df.shape[1])+ ' columns') print('There are ' + str(mis_val_table_rename_columns.shape[0])+ ' columns') return mis_val_table_rename_columns
Home Credit Default Risk
1,316,642
pokemon.fillna("noType", inplace=True) pokemon = pd.concat([ pokemon, pd.get_dummies(pokemon["Type 1"], prefix="t1"), pd.get_dummies(pokemon["Type 2"], prefix="t2"), ], axis=1) pokemon.drop(["Type 1", "Type 2", "Name", "Generation", "Legendary"], axis=1, inplace=True) normalizeColumns(["HP", "Sp.Atk", "Sp.Def", "Attack", "Defense", "Speed"], pokemon )<merge>
missing_values = missing_values_table(app_train) missing_values.head(20 )
Home Credit Default Risk
1,316,642
def mergePokemonStats(battles, pokemon): data = battles \ .merge(pokemon, left_on="First_pokemon", right_on=" .merge(pokemon, left_on="Second_pokemon", right_on=" .sort_values(['battle_number'])\ .drop([" data = data.reindex(sorted(data.columns), axis=1) return data<merge>
app_train.dtypes.value_counts()
Home Credit Default Risk
1,316,642
data = mergePokemonStats(battles, pokemon) data.head()<prepare_x_and_y>
app_train.select_dtypes('object' ).apply(pd.Series.nunique, axis=0 )
Home Credit Default Risk
1,316,642
y = data["Winner"].values x = data.drop(["Winner"],axis=1 ).values<import_modules>
app_test.dtypes.value_counts()
Home Credit Default Risk
1,316,642
import tensorflow as tf import sklearn.model_selection import sklearn.ensemble import sklearn.metrics<choose_model_class>
app_train = pd.get_dummies(app_train) app_test = pd.get_dummies(app_test) print('Training Features shape: ', app_train.shape) print('Testing Features shape: ', app_test.shape )
Home Credit Default Risk
1,316,642
mlp = tf.keras.Sequential([ tf.keras.layers.Dense(310, activation=tf.nn.relu), tf.keras.layers.GaussianNoise(0.17), tf.keras.layers.Dropout(0.50), tf.keras.layers.Dense(310, activation=tf.nn.relu), tf.keras.layers.Dropout(0.50), tf.keras.layers.Dense(1, activation=tf.nn.sigmoid) ] )<choose_model_class>
train_labels = app_train['TARGET'] app_train, app_test = app_train.align(app_test, join = 'inner', axis = 1) app_train['TARGET'] = train_labels print('Training Features shape: ', app_train.shape) print('Testing Features shape: ', app_test.shape )
Home Credit Default Risk
1,316,642
mlp.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"] )<train_model>
app_train['DAYS_EMPLOYED_ANOM'] = app_train['DAYS_EMPLOYED'] == 365243 app_train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True) app_test['DAYS_EMPLOYED_ANOM'] = app_test['DAYS_EMPLOYED'] == 365243 app_test['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True) print('Training Features shape: ', app_train.shape) print('Testing Features shape: ', app_test.shape )
Home Credit Default Risk
1,316,642
history = mlp.fit( x, y, epochs=350, workers=4, batch_size=512, shuffle=True, validation_split=0.1 )<split>
app_train_domain = app_train.copy() app_test_domain = app_test.copy() app_train_domain['CREDIT_INCOME_PERCENT'] = app_train_domain['AMT_CREDIT'] / app_train_domain['AMT_INCOME_TOTAL'] app_train_domain['ANNUITY_INCOME_PERCENT'] = app_train_domain['AMT_ANNUITY'] / app_train_domain['AMT_INCOME_TOTAL'] app_train_domain['CREDIT_TERM'] = app_train_domain['AMT_ANNUITY'] / app_train_domain['AMT_CREDIT'] app_train_domain['DAYS_EMPLOYED_PERCENT'] = app_train_domain['DAYS_EMPLOYED'] / app_train_domain['DAYS_BIRTH'] app_test_domain['CREDIT_INCOME_PERCENT'] = app_test_domain['AMT_CREDIT'] / app_test_domain['AMT_INCOME_TOTAL'] app_test_domain['ANNUITY_INCOME_PERCENT'] = app_test_domain['AMT_ANNUITY'] / app_test_domain['AMT_INCOME_TOTAL'] app_test_domain['CREDIT_TERM'] = app_test_domain['AMT_ANNUITY'] / app_test_domain['AMT_CREDIT'] app_test_domain['DAYS_EMPLOYED_PERCENT'] = app_test_domain['DAYS_EMPLOYED'] / app_test_domain['DAYS_BIRTH'] print('Domain Training Features shape: ', app_train_domain.shape) print('Domain Testing Features shape: ', app_test_domain.shape )
Home Credit Default Risk
1,316,642
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.1) gradientBoosting = sklearn.ensemble.GradientBoostingClassifier( n_estimators=1000, max_depth=10, verbose = 1 ) gradientBoosting.fit(x_train, y_train )<compute_test_metric>
bureau = pd.read_csv('.. /input/bureau.csv') bureau.head()
Home Credit Default Risk
1,316,642
sklearn.metrics.accuracy_score(y_test, gradientBoosting.predict(x_test), normalize=True, sample_weight=None )<load_from_csv>
previous_loan_counts = bureau.groupby('SK_ID_CURR', as_index=False)['SK_ID_BUREAU'].count().rename(columns = {'SK_ID_BUREAU': 'previous_loan_counts'}) previous_loan_counts.head()
Home Credit Default Risk
1,316,642
submission = pd.read_csv(".. /input/test.csv") test = mergePokemonStats(submission, pokemon )<predict_on_test>
def agg_numeric(df, group_var, df_name): for col in df: if col != group_var and 'SK_ID' in col: df = df.drop(columns = col) group_ids = df[group_var] numeric_df = df.select_dtypes('number') numeric_df[group_var] = group_ids agg = numeric_df.groupby(group_var ).agg(['count', 'mean', 'max', 'min', 'sum'] ).reset_index() columns = [group_var] for var in agg.columns.levels[0]: if var != group_var: for stat in agg.columns.levels[1][:-1]: columns.append('%s_%s_%s' %(df_name, var, stat)) agg.columns = columns return agg bureau_agg = agg_numeric(bureau.drop(columns = ['SK_ID_BUREAU']), group_var = 'SK_ID_CURR', df_name = 'bureau') bureau_agg.head()
Home Credit Default Risk
1,316,642
predictionMLP = np.transpose(mlp.predict(test.values)) predictionGB = gradientBoosting.predict_proba(test.values) predictionGB = np.array([predict[1] for predict in predictionGB]) prediction =(( predictionMLP)+(0.10*predictionGB)) /1.10 prediction[prediction <= 0.5] = 0 prediction[prediction > 0.5] = 1<data_type_conversions>
def count_categorical(df, group_var, df_name): categorical = pd.get_dummies(df.select_dtypes('object')) categorical[group_var] = df[group_var] categorical = categorical.groupby(group_var ).agg(['sum', 'mean']) column_names = [] for var in categorical.columns.levels[0]: for stat in ['count', 'count_norm']: column_names.append('%s_%s_%s' %(df_name, var, stat)) categorical.columns = column_names return categorical bureau_counts = count_categorical(bureau, group_var = 'SK_ID_CURR', df_name = 'bureau') bureau_counts.head()
Home Credit Default Risk
1,316,642
submission["Winner"] = prediction[0].astype("int") submission.drop(["First_pokemon", "Second_pokemon"], axis=1, inplace=True) submission.head()<save_to_csv>
train = app_train_domain.merge(previous_loan_counts, on = 'SK_ID_CURR', how = 'left') train['previous_loan_counts'] = train['previous_loan_counts'].fillna(0) test = app_test_domain.merge(previous_loan_counts, on = 'SK_ID_CURR', how = 'left') test['previous_loan_counts'] = test['previous_loan_counts'].fillna(0 )
Home Credit Default Risk
1,316,642
submission.to_csv("submission.csv", index=False )<save_to_csv>
train = train.merge(bureau_agg, on = 'SK_ID_CURR', how = 'left') test = test.merge(bureau_agg, on = 'SK_ID_CURR', how = 'left' )
Home Credit Default Risk
1,316,642
submission.to_csv("submission.csv", index=False )<load_from_csv>
train = train.merge(bureau_counts, on = 'SK_ID_CURR', how = 'left') test = test.merge(bureau_counts, on = 'SK_ID_CURR', how = 'left' )
Home Credit Default Risk
1,316,642
battles = pd.read_csv('.. /input/battles.csv') pokemon = pd.read_csv('.. /input/pokemon.csv') test = pd.read_csv('.. /input/test.csv') pokemon = pokemon.drop(columns='Name', errors='ignore') pokemon.loc[:, 'Type 1'] = pokemon.loc[:, 'Type 1'].fillna('None') pokemon.loc[:, 'Type 2'] = pokemon.loc[:, 'Type 2'].fillna('None') pokemon.Legendary = pokemon.Legendary.astype(int) data = pd.merge(battles, pokemon, left_on='First_pokemon' ,right_on=' data = pd.merge(data, pokemon, left_on='Second_pokemon',right_on=' data = data.sort_values(['battle_number']) data = data.drop(columns=[' data = data.iloc[:, 3:] data.head()<merge>
print('Before align train.shape: ', train.shape) print('Before align test.shape: ', test.shape) train_labels = train['TARGET'] train, test = train.align(test, join = 'inner', axis = 1) train['TARGET'] = train_labels print('After align train.shape: ', train.shape) print('After align test.shape: ', test.shape )
Home Credit Default Risk
1,316,642
tout = pd.merge(test, pokemon, left_on='First_pokemon' ,right_on=' tout = pd.merge(tout, pokemon, left_on='Second_pokemon',right_on=' tout = tout.sort_values(['battle_number']) tout = tout.drop(columns=[' tout = tout.iloc[:, 3:] tout.head()<split>
bureau_balance = pd.read_csv('.. /input/bureau_balance.csv') bureau_balance.head()
Home Credit Default Risk
1,316,642
TEST_SIZE = 0.01 tr_idx, ts_idx = train_test_split(range(len(battles)) , test_size=TEST_SIZE) cat_names = ['Generation_A', 'Generation_B', 'Type 1_A', 'Type 1_B', 'Type 2_A', 'Type 2_B', 'Legendary_A', 'Legendary_B'] procs = [Categorify, Normalize] db = TabularDataBunch.from_df(path='.', df=data, dep_var='Winner', valid_idx=ts_idx, procs=procs, test_df=tout, cat_names=cat_names, bs=1024) learn = tabular_learner(db, layers=[64, 32, 16, 8], metrics=accuracy) learn.lr_find() learn.recorder.plot()<train_model>
bureau_balance_agg = agg_numeric(bureau_balance, group_var = 'SK_ID_BUREAU', df_name = 'bureau_balance') bureau_balance_agg.head()
Home Credit Default Risk
1,316,642
learn.fit_one_cycle(50, 1e-1 )<prepare_output>
bureau_balance_counts = count_categorical(bureau_balance, group_var = 'SK_ID_BUREAU', df_name = 'bureau_balance') bureau_balance_counts.head()
Home Credit Default Risk
1,316,642
preds, _ = learn.get_preds(ds_type=DatasetType.Test) preds = np.argmax(preds, axis=1 ).numpy() submission = pd.DataFrame(test.iloc[:, 0]) submission['Winner'] = preds<save_to_csv>
bureau_by_loan = bureau_balance_agg.merge(bureau_balance_counts, right_index = True, left_on = 'SK_ID_BUREAU', how = 'outer' )
Home Credit Default Risk
1,316,642
print(submission.head()) submission.to_csv('./submission.csv', index=False )<save_to_csv>
bureau_by_loan = bureau_by_loan.merge(bureau[['SK_ID_BUREAU', 'SK_ID_CURR']], on = 'SK_ID_BUREAU', how = 'left' )
Home Credit Default Risk
1,316,642
def create_download_link(df, title = "Download CSV file", filename = "data.csv"): csv = df.to_csv(index=False) b64 = base64.b64encode(csv.encode()) payload = b64.decode() html = '<a download="{filename}" href="data:text/csv;base64,{payload}" target="_blank">{title}</a>' html = html.format(payload=payload,title=title,filename=filename) return HTML(html) create_download_link(submission )<set_options>
bureau_balance_by_client = agg_numeric(bureau_by_loan.drop(columns = ['SK_ID_BUREAU']), group_var = 'SK_ID_CURR', df_name = 'client') bureau_balance_by_client.head()
Home Credit Default Risk
1,316,642
%matplotlib inline<load_from_csv>
train = train.merge(bureau_balance_by_client, on = 'SK_ID_CURR', how = 'left') test = test.merge(bureau_balance_by_client, on = 'SK_ID_CURR', how = 'left' )
Home Credit Default Risk
1,316,642
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )<load_from_csv>
print('Before align train.shape: ', train.shape) print('Before align test.shape: ', test.shape) train_labels = train['TARGET'] train, test = train.align(test, join = 'inner', axis = 1) train['TARGET'] = train_labels print('After align train.shape: ', train.shape) print('After align test.shape: ', test.shape )
Home Credit Default Risk