kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
18,348,927
model.save('./sentiment-analysis-on-movie-reviews/Movie_sentiment_analysis_model' )<load_from_csv>
df['CREDIT_TO_ANNUITY_RATIO'] = df['AMT_CREDIT'] / df['AMT_ANNUITY'] df['CREDIT_TO_GOODS_RATIO'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE'] df['ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL'] df['CREDIT_TO_INCOME_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL'] df['INCOME_TO_EMPLOYED_RATIO'] = df['AMT_INCOME_TOTAL'] / df['DAYS_EMPLOYED'] df['INCOME_TO_BIRTH_RATIO'] = df['AMT_INCOME_TOTAL'] / df['DAYS_BIRTH'] df['EMPLOYED_TO_BIRTH_RATIO'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH'] df['ID_TO_BIRTH_RATIO'] = df['DAYS_ID_PUBLISH'] / df['DAYS_BIRTH'] df['CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH'] df['CAR_TO_EMPLOYED_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED'] df['PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
Home Credit Default Risk
18,348,927
test=pd.read_table("/kaggle/working/sentiment-analysis-on-movie-reviews/test.tsv",sep='\t' )<data_type_conversions>
def do_mean(df, group_cols, counted, agg_name): gp = df[group_cols + [counted]].groupby(group_cols)[counted].mean().reset_index().rename( columns={counted: agg_name}) df = df.merge(gp, on=group_cols, how='left') del gp gc.collect() return df
Home Credit Default Risk
18,348,927
x_test = tokenizer( text=test.Phrase.to_list() , add_special_tokens=True, max_length=max_token_length, truncation=True, padding=True, return_tensors='tf', return_token_type_ids = False, return_attention_mask = True, verbose = True )<create_dataframe>
def do_median(df, group_cols, counted, agg_name): gp = df[group_cols + [counted]].groupby(group_cols)[counted].median().reset_index().rename( columns={counted: agg_name}) df = df.merge(gp, on=group_cols, how='left') del gp gc.collect() return df
Home Credit Default Risk
18,348,927
test_items=tf.data.Dataset.from_tensor_slices(( x_test['input_ids'],x_test['attention_mask']))<categorify>
def do_std(df, group_cols, counted, agg_name): gp = df[group_cols + [counted]].groupby(group_cols)[counted].std().reset_index().rename( columns={counted: agg_name}) df = df.merge(gp, on=group_cols, how='left') del gp gc.collect() return df
Home Credit Default Risk
18,348,927
def map_func(input_ids, masks): return {'input_ids': input_ids, 'attention_mask': masks} test_items = test_items.map(map_func) test_items = test_items.batch(32 )<predict_on_test>
def do_sum(df, group_cols, counted, agg_name): gp = df[group_cols + [counted]].groupby(group_cols)[counted].sum().reset_index().rename( columns={counted: agg_name}) df = df.merge(gp, on=group_cols, how='left') del gp gc.collect() return df
Home Credit Default Risk
18,348,927
predictions=model.predict(test_items ).argmax(axis=-1 )<save_to_csv>
group = ['ORGANIZATION_TYPE', 'NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'AGE_RANGE', 'CODE_GENDER'] df = do_median(df, group, 'EXT_SOURCES_MEAN', 'GROUP_EXT_SOURCES_MEDIAN') df = do_std(df, group, 'EXT_SOURCES_MEAN', 'GROUP_EXT_SOURCES_STD') df = do_mean(df, group, 'AMT_INCOME_TOTAL', 'GROUP_INCOME_MEAN') df = do_std(df, group, 'AMT_INCOME_TOTAL', 'GROUP_INCOME_STD') df = do_mean(df, group, 'CREDIT_TO_ANNUITY_RATIO', 'GROUP_CREDIT_TO_ANNUITY_MEAN') df = do_std(df, group, 'CREDIT_TO_ANNUITY_RATIO', 'GROUP_CREDIT_TO_ANNUITY_STD') df = do_mean(df, group, 'AMT_CREDIT', 'GROUP_CREDIT_MEAN') df = do_mean(df, group, 'AMT_ANNUITY', 'GROUP_ANNUITY_MEAN') df = do_std(df, group, 'AMT_ANNUITY', 'GROUP_ANNUITY_STD' )
Home Credit Default Risk
18,348,927
submission = pd.DataFrame() submission['PhraseId'] = test['PhraseId'] submission['Sentiment'] = predictions submission.to_csv("submission.csv", index=False) submission.head()<load_from_csv>
def label_encoder(df, categorical_columns=None): if not categorical_columns: categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] for col in categorical_columns: df[col], uniques = pd.factorize(df[col]) return df, categorical_columns
Home Credit Default Risk
18,348,927
train_data = pd.read_csv('.. /input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep = '\t') train_data.head()<load_from_csv>
def drop_application_columns(df): drop_list = [ 'CNT_CHILDREN', 'CNT_FAM_MEMBERS', 'HOUR_APPR_PROCESS_START', 'FLAG_EMP_PHONE', 'FLAG_MOBIL', 'FLAG_CONT_MOBILE', 'FLAG_EMAIL', 'FLAG_PHONE', 'FLAG_OWN_REALTY', 'REG_REGION_NOT_LIVE_REGION', 'REG_REGION_NOT_WORK_REGION', 'REG_CITY_NOT_WORK_CITY', 'OBS_30_CNT_SOCIAL_CIRCLE', 'OBS_60_CNT_SOCIAL_CIRCLE', 'AMT_REQ_CREDIT_BUREAU_DAY', 'AMT_REQ_CREDIT_BUREAU_MON', 'AMT_REQ_CREDIT_BUREAU_YEAR', 'COMMONAREA_MODE', 'NONLIVINGAREA_MODE', 'ELEVATORS_MODE', 'NONLIVINGAREA_AVG', 'FLOORSMIN_MEDI', 'LANDAREA_MODE', 'NONLIVINGAREA_MEDI', 'LIVINGAPARTMENTS_MODE', 'FLOORSMIN_AVG', 'LANDAREA_AVG', 'FLOORSMIN_MODE', 'LANDAREA_MEDI', 'COMMONAREA_MEDI', 'YEARS_BUILD_AVG', 'COMMONAREA_AVG', 'BASEMENTAREA_AVG', 'BASEMENTAREA_MODE', 'NONLIVINGAPARTMENTS_MEDI', 'BASEMENTAREA_MEDI', 'LIVINGAPARTMENTS_AVG', 'ELEVATORS_AVG', 'YEARS_BUILD_MEDI', 'ENTRANCES_MODE', 'NONLIVINGAPARTMENTS_MODE', 'LIVINGAREA_MODE', 'LIVINGAPARTMENTS_MEDI', 'YEARS_BUILD_MODE', 'YEARS_BEGINEXPLUATATION_AVG', 'ELEVATORS_MEDI', 'LIVINGAREA_MEDI', 'YEARS_BEGINEXPLUATATION_MODE', 'NONLIVINGAPARTMENTS_AVG', 'HOUSETYPE_MODE', 'FONDKAPREMONT_MODE', 'EMERGENCYSTATE_MODE' ] for doc_num in [2,4,5,6,7,9,10,11,12,13,14,15,16,17,19,20,21]: drop_list.append('FLAG_DOCUMENT_{}'.format(doc_num)) df.drop(drop_list, axis=1, inplace=True) return df
Home Credit Default Risk
18,348,927
test_data = pd.read_csv('.. /input/sentiment-analysis-on-movie-reviews/test.tsv.zip',sep = '\t') test_data.head()<import_modules>
df, le_encoded_cols = label_encoder(df, None) df = drop_application_columns(df )
Home Credit Default Risk
18,348,927
import matplotlib.pyplot as plt import tensorflow as tf<import_modules>
df = pd.get_dummies(df )
Home Credit Default Risk
18,348,927
from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences<set_options>
bureau = pd.read_csv(os.path.join(DATA_DIRECTORY, 'bureau.csv'))
Home Credit Default Risk
18,348,927
print("TF version: ", tf.__version__) if tf.__version__ < "2.0.0": tf.enable_eager_execution() print("Eager execution enabled.") else: print("Eager execution enabled by default.") if tf.test.gpu_device_name() : print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) else: print("Please install GPU version of TF" )<count_unique_values>
bureau['CREDIT_DURATION'] = -bureau['DAYS_CREDIT'] + bureau['DAYS_CREDIT_ENDDATE'] bureau['ENDDATE_DIF'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT'] bureau['DEBT_PERCENTAGE'] = bureau['AMT_CREDIT_SUM'] / bureau['AMT_CREDIT_SUM_DEBT'] bureau['DEBT_CREDIT_DIFF'] = bureau['AMT_CREDIT_SUM'] - bureau['AMT_CREDIT_SUM_DEBT'] bureau['CREDIT_TO_ANNUITY_RATIO'] = bureau['AMT_CREDIT_SUM'] / bureau['AMT_ANNUITY']
Home Credit Default Risk
18,348,927
print(train_data['Sentiment'].unique()) train_data['Sentiment'].nunique()<count_values>
def one_hot_encoder(df, categorical_columns=None, nan_as_category=True): original_columns = list(df.columns) if not categorical_columns: categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category) categorical_columns = [c for c in df.columns if c not in original_columns] return df, categorical_columns
Home Credit Default Risk
18,348,927
train_data['Sentiment'].value_counts()<import_modules>
def group(df_to_agg, prefix, aggregations, aggregate_by= 'SK_ID_CURR'): agg_df = df_to_agg.groupby(aggregate_by ).agg(aggregations) agg_df.columns = pd.Index(['{}{}_{}'.format(prefix, e[0], e[1].upper()) for e in agg_df.columns.tolist() ]) return agg_df.reset_index()
Home Credit Default Risk
18,348,927
from tqdm import tqdm import nltk from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords import re<string_transform>
def group_and_merge(df_to_agg, df_to_merge, prefix, aggregations, aggregate_by= 'SK_ID_CURR'): agg_df = group(df_to_agg, prefix, aggregations, aggregate_by= aggregate_by) return df_to_merge.merge(agg_df, how='left', on= aggregate_by )
Home Credit Default Risk
18,348,927
def sentence_cleaning(df): sentence = [] for sent in tqdm(df['Phrase']): text = re.sub("[^a-zA-Z]"," ",sent) word = word_tokenize(text.lower()) lemmatizer = WordNetLemmatizer() lemm_word = [lemmatizer.lemmatize(i)for i in word] sentence.append(lemm_word) return(sentence )<prepare_x_and_y>
def get_bureau_balance(path, num_rows= None): bb = pd.read_csv(os.path.join(path, 'bureau_balance.csv')) bb, categorical_cols = one_hot_encoder(bb, nan_as_category= False) bb_processed = bb.groupby('SK_ID_BUREAU')[categorical_cols].mean().reset_index() agg = {'MONTHS_BALANCE': ['min', 'max', 'mean', 'size']} bb_processed = group_and_merge(bb, bb_processed, '', agg, 'SK_ID_BUREAU') del bb; gc.collect() return bb_processed
Home Credit Default Risk
18,348,927
target_col = train_data.Sentiment.values y_target = to_categorical(target_col) y_target.shape<split>
bureau, categorical_cols = one_hot_encoder(bureau, nan_as_category= False) bureau = bureau.merge(get_bureau_balance(DATA_DIRECTORY), how='left', on='SK_ID_BUREAU') bureau['STATUS_12345'] = 0 for i in range(1,6): bureau['STATUS_12345'] += bureau['STATUS_{}'.format(i)]
Home Credit Default Risk
18,348,927
X_train,X_val,y_train,y_val = train_test_split(train_sent,y_target,test_size = 0.2,stratify = y_target )<count_unique_values>
features = ['AMT_CREDIT_MAX_OVERDUE', 'AMT_CREDIT_SUM_OVERDUE', 'AMT_CREDIT_SUM', 'AMT_CREDIT_SUM_DEBT', 'DEBT_PERCENTAGE', 'DEBT_CREDIT_DIFF', 'STATUS_0', 'STATUS_12345'] agg_length = bureau.groupby('MONTHS_BALANCE_SIZE')[features].mean().reset_index() agg_length.rename({feat: 'LL_' + feat for feat in features}, axis=1, inplace=True) bureau = bureau.merge(agg_length, how='left', on='MONTHS_BALANCE_SIZE') del agg_length; gc.collect()
Home Credit Default Risk
18,348,927
unique_words = set() max_len = 0 for sent in tqdm(X_train): unique_words.update(sent) if(max_len < len(sent)) : max_len = len(sent) sentence = sent<define_variables>
BUREAU_AGG = { 'SK_ID_BUREAU': ['nunique'], 'DAYS_CREDIT': ['min', 'max', 'mean'], 'DAYS_CREDIT_ENDDATE': ['min', 'max'], 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_OVERDUE': ['max', 'mean', 'sum'], 'AMT_ANNUITY': ['mean'], 'DEBT_CREDIT_DIFF': ['mean', 'sum'], 'MONTHS_BALANCE_MEAN': ['mean', 'var'], 'MONTHS_BALANCE_SIZE': ['mean', 'sum'], 'STATUS_0': ['mean'], 'STATUS_1': ['mean'], 'STATUS_12345': ['mean'], 'STATUS_C': ['mean'], 'STATUS_X': ['mean'], 'CREDIT_ACTIVE_Active': ['mean'], 'CREDIT_ACTIVE_Closed': ['mean'], 'CREDIT_ACTIVE_Sold': ['mean'], 'CREDIT_TYPE_Consumer credit': ['mean'], 'CREDIT_TYPE_Credit card': ['mean'], 'CREDIT_TYPE_Car loan': ['mean'], 'CREDIT_TYPE_Mortgage': ['mean'], 'CREDIT_TYPE_Microloan': ['mean'], 'LL_AMT_CREDIT_SUM_OVERDUE': ['mean'], 'LL_DEBT_CREDIT_DIFF': ['mean'], 'LL_STATUS_12345': ['mean'], } BUREAU_ACTIVE_AGG = { 'DAYS_CREDIT': ['max', 'mean'], 'DAYS_CREDIT_ENDDATE': ['min', 'max'], 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM': ['max', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['mean', 'sum'], 'AMT_CREDIT_SUM_OVERDUE': ['max', 'mean'], 'DAYS_CREDIT_UPDATE': ['min', 'mean'], 'DEBT_PERCENTAGE': ['mean'], 'DEBT_CREDIT_DIFF': ['mean'], 'CREDIT_TO_ANNUITY_RATIO': ['mean'], 'MONTHS_BALANCE_MEAN': ['mean', 'var'], 'MONTHS_BALANCE_SIZE': ['mean', 'sum'], } BUREAU_CLOSED_AGG = { 'DAYS_CREDIT': ['max', 'var'], 'DAYS_CREDIT_ENDDATE': ['max'], 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM_OVERDUE': ['mean'], 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['max', 'sum'], 'DAYS_CREDIT_UPDATE': ['max'], 'ENDDATE_DIF': ['mean'], 'STATUS_12345': ['mean'], } BUREAU_LOAN_TYPE_AGG = { 'DAYS_CREDIT': ['mean', 'max'], 'AMT_CREDIT_MAX_OVERDUE': ['mean', 'max'], 'AMT_CREDIT_SUM': ['mean', 'max'], 'AMT_CREDIT_SUM_DEBT': ['mean', 'max'], 'DEBT_PERCENTAGE': ['mean'], 'DEBT_CREDIT_DIFF': ['mean'], 'DAYS_CREDIT_ENDDATE': ['max'], } BUREAU_TIME_AGG = { 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM_OVERDUE': ['mean'], 'AMT_CREDIT_SUM': ['max', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['mean', 'sum'], 'DEBT_PERCENTAGE': ['mean'], 'DEBT_CREDIT_DIFF': ['mean'], 'STATUS_0': ['mean'], 'STATUS_12345': ['mean'], }
Home Credit Default Risk
18,348,927
vocabulary = len(list(unique_words)) oov = '<OOV>' embedding_dim = 300 padding = 'post' trunc = 'post'<string_transform>
agg_bureau = group(bureau, 'BUREAU_', BUREAU_AGG) active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1] agg_bureau = group_and_merge(active,agg_bureau,'BUREAU_ACTIVE_',BUREAU_ACTIVE_AGG) closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1] agg_bureau = group_and_merge(closed,agg_bureau,'BUREAU_CLOSED_',BUREAU_CLOSED_AGG) del active, closed; gc.collect() for credit_type in ['Consumer credit', 'Credit card', 'Mortgage', 'Car loan', 'Microloan']: type_df = bureau[bureau['CREDIT_TYPE_' + credit_type] == 1] prefix = 'BUREAU_' + credit_type.split(' ')[0].upper() + '_' agg_bureau = group_and_merge(type_df, agg_bureau, prefix, BUREAU_LOAN_TYPE_AGG) del type_df; gc.collect() for time_frame in [6, 12]: prefix = "BUREAU_LAST{}M_".format(time_frame) time_frame_df = bureau[bureau['DAYS_CREDIT'] >= -30*time_frame] agg_bureau = group_and_merge(time_frame_df, agg_bureau, prefix, BUREAU_TIME_AGG) del time_frame_df; gc.collect()
Home Credit Default Risk
18,348,927
tokenizer = Tokenizer(num_words = vocabulary,oov_token = oov,char_level = False) tokenizer.fit_on_texts(list(X_train)) X_train = tokenizer.texts_to_sequences(X_train) X_train = pad_sequences(X_train,maxlen = max_len,padding=padding,truncating = trunc) X_val = tokenizer.texts_to_sequences(X_val) X_val = pad_sequences(X_val,maxlen = max_len,padding=padding,truncating = trunc) X_test = tokenizer.texts_to_sequences(test_sent) X_test = pad_sequences(X_test,maxlen = max_len,padding=padding,truncating = trunc )<import_modules>
sort_bureau = bureau.sort_values(by=['DAYS_CREDIT']) gr = sort_bureau.groupby('SK_ID_CURR')['AMT_CREDIT_MAX_OVERDUE'].last().reset_index() gr.rename({'AMT_CREDIT_MAX_OVERDUE': 'BUREAU_LAST_LOAN_MAX_OVERDUE'}, inplace=True) agg_bureau = agg_bureau.merge(gr, on='SK_ID_CURR', how='left') agg_bureau['BUREAU_DEBT_OVER_CREDIT'] = \ agg_bureau['BUREAU_AMT_CREDIT_SUM_DEBT_SUM']/agg_bureau['BUREAU_AMT_CREDIT_SUM_SUM'] agg_bureau['BUREAU_ACTIVE_DEBT_OVER_CREDIT'] = \ agg_bureau['BUREAU_ACTIVE_AMT_CREDIT_SUM_DEBT_SUM']/agg_bureau['BUREAU_ACTIVE_AMT_CREDIT_SUM_SUM']
Home Credit Default Risk
18,348,927
from keras.models import Sequential from keras.layers import Dense,Bidirectional,Activation,Dropout,LSTM,Embedding from keras.layers.embeddings import Embedding<choose_model_class>
df = pd.merge(df, agg_bureau, on='SK_ID_CURR', how='left') del agg_bureau, bureau gc.collect()
Home Credit Default Risk
18,348,927
model = tf.keras.Sequential() model.add(Embedding(vocabulary,embedding_dim,input_length = max_len)) model.add(Bidirectional(LSTM(128, dropout = 0.8, recurrent_dropout=0.8, return_sequences=True))) model.add(Bidirectional(LSTM(128,dropout = 0.5,recurrent_dropout=0.5,return_sequences=False))) model.add(Dense(64,activation='relu')) model.add(Dropout(0.4)) model.add(Dense(5,activation='softmax')) model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy']) model.summary()<choose_model_class>
prev = pd.read_csv(os.path.join(DATA_DIRECTORY, 'previous_application.csv')) pay = pd.read_csv(os.path.join(DATA_DIRECTORY, 'installments_payments.csv'))
Home Credit Default Risk
18,348,927
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'] )<train_model>
PREVIOUS_AGG = { 'SK_ID_PREV': ['nunique'], 'AMT_ANNUITY': ['min', 'max', 'mean'], 'AMT_DOWN_PAYMENT': ['max', 'mean'], 'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'], 'RATE_DOWN_PAYMENT': ['max', 'mean'], 'DAYS_DECISION': ['min', 'max', 'mean'], 'CNT_PAYMENT': ['max', 'mean'], 'DAYS_TERMINATION': ['max'], 'CREDIT_TO_ANNUITY_RATIO': ['mean', 'max'], 'APPLICATION_CREDIT_DIFF': ['min', 'max', 'mean'], 'APPLICATION_CREDIT_RATIO': ['min', 'max', 'mean', 'var'], 'DOWN_PAYMENT_TO_CREDIT': ['mean'], } PREVIOUS_ACTIVE_AGG = { 'SK_ID_PREV': ['nunique'], 'SIMPLE_INTERESTS': ['mean'], 'AMT_ANNUITY': ['max', 'sum'], 'AMT_APPLICATION': ['max', 'mean'], 'AMT_CREDIT': ['sum'], 'AMT_DOWN_PAYMENT': ['max', 'mean'], 'DAYS_DECISION': ['min', 'mean'], 'CNT_PAYMENT': ['mean', 'sum'], 'DAYS_LAST_DUE_1ST_VERSION': ['min', 'max', 'mean'], 'AMT_PAYMENT': ['sum'], 'INSTALMENT_PAYMENT_DIFF': ['mean', 'max'], 'REMAINING_DEBT': ['max', 'mean', 'sum'], 'REPAYMENT_RATIO': ['mean'], } PREVIOUS_LATE_PAYMENTS_AGG = { 'DAYS_DECISION': ['min', 'max', 'mean'], 'DAYS_LAST_DUE_1ST_VERSION': ['min', 'max', 'mean'], 'APPLICATION_CREDIT_DIFF': ['min'], 'NAME_CONTRACT_TYPE_Consumer loans': ['mean'], 'NAME_CONTRACT_TYPE_Cash loans': ['mean'], 'NAME_CONTRACT_TYPE_Revolving loans': ['mean'], } PREVIOUS_LOAN_TYPE_AGG = { 'AMT_CREDIT': ['sum'], 'AMT_ANNUITY': ['mean', 'max'], 'SIMPLE_INTERESTS': ['min', 'mean', 'max', 'var'], 'APPLICATION_CREDIT_DIFF': ['min', 'var'], 'APPLICATION_CREDIT_RATIO': ['min', 'max', 'mean'], 'DAYS_DECISION': ['max'], 'DAYS_LAST_DUE_1ST_VERSION': ['max', 'mean'], 'CNT_PAYMENT': ['mean'], } PREVIOUS_TIME_AGG = { 'AMT_CREDIT': ['sum'], 'AMT_ANNUITY': ['mean', 'max'], 'SIMPLE_INTERESTS': ['mean', 'max'], 'DAYS_DECISION': ['min', 'mean'], 'DAYS_LAST_DUE_1ST_VERSION': ['min', 'max', 'mean'], 'APPLICATION_CREDIT_DIFF': ['min'], 'APPLICATION_CREDIT_RATIO': ['min', 'max', 'mean'], 'NAME_CONTRACT_TYPE_Consumer loans': ['mean'], 'NAME_CONTRACT_TYPE_Cash loans': ['mean'], 'NAME_CONTRACT_TYPE_Revolving loans': ['mean'], } PREVIOUS_APPROVED_AGG = { 'SK_ID_PREV': ['nunique'], 'AMT_ANNUITY': ['min', 'max', 'mean'], 'AMT_CREDIT': ['min', 'max', 'mean'], 'AMT_DOWN_PAYMENT': ['max'], 'AMT_GOODS_PRICE': ['max'], 'HOUR_APPR_PROCESS_START': ['min', 'max'], 'DAYS_DECISION': ['min', 'mean'], 'CNT_PAYMENT': ['max', 'mean'], 'DAYS_TERMINATION': ['mean'], 'CREDIT_TO_ANNUITY_RATIO': ['mean', 'max'], 'APPLICATION_CREDIT_DIFF': ['max'], 'APPLICATION_CREDIT_RATIO': ['min', 'max', 'mean'], 'DAYS_FIRST_DRAWING': ['max', 'mean'], 'DAYS_FIRST_DUE': ['min', 'mean'], 'DAYS_LAST_DUE_1ST_VERSION': ['min', 'max', 'mean'], 'DAYS_LAST_DUE': ['max', 'mean'], 'DAYS_LAST_DUE_DIFF': ['min', 'max', 'mean'], 'SIMPLE_INTERESTS': ['min', 'max', 'mean'], } PREVIOUS_REFUSED_AGG = { 'AMT_APPLICATION': ['max', 'mean'], 'AMT_CREDIT': ['min', 'max'], 'DAYS_DECISION': ['min', 'max', 'mean'], 'CNT_PAYMENT': ['max', 'mean'], 'APPLICATION_CREDIT_DIFF': ['min', 'max', 'mean', 'var'], 'APPLICATION_CREDIT_RATIO': ['min', 'mean'], 'NAME_CONTRACT_TYPE_Consumer loans': ['mean'], 'NAME_CONTRACT_TYPE_Cash loans': ['mean'], 'NAME_CONTRACT_TYPE_Revolving loans': ['mean'], }
Home Credit Default Risk
18,348,927
hist_model = model.fit(X_train,y_train, validation_data =(X_val, y_val), epochs = 4, batch_size = 256, verbose = 1 )<define_variables>
ohe_columns = [ 'NAME_CONTRACT_STATUS', 'NAME_CONTRACT_TYPE', 'CHANNEL_TYPE', 'NAME_TYPE_SUITE', 'NAME_YIELD_GROUP', 'PRODUCT_COMBINATION', 'NAME_PRODUCT_TYPE', 'NAME_CLIENT_TYPE'] prev, categorical_cols = one_hot_encoder(prev, ohe_columns, nan_as_category= False )
Home Credit Default Risk
18,348,927
test_id = test_data['PhraseId'] test_id<save_to_csv>
prev['APPLICATION_CREDIT_DIFF'] = prev['AMT_APPLICATION'] - prev['AMT_CREDIT'] prev['APPLICATION_CREDIT_RATIO'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT'] prev['CREDIT_TO_ANNUITY_RATIO'] = prev['AMT_CREDIT']/prev['AMT_ANNUITY'] prev['DOWN_PAYMENT_TO_CREDIT'] = prev['AMT_DOWN_PAYMENT'] / prev['AMT_CREDIT'] total_payment = prev['AMT_ANNUITY'] * prev['CNT_PAYMENT'] prev['SIMPLE_INTERESTS'] =(total_payment/prev['AMT_CREDIT'] - 1)/prev['CNT_PAYMENT']
Home Credit Default Risk
18,348,927
y_pred = np.argmax(model.predict(X_test), axis = -1) submission_df = pd.DataFrame({'PhraseId': test_id, 'Sentiment': y_pred}) submission_df.to_csv('submission.csv', index=False) submission_df.head()<import_modules>
approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1] active_df = approved[approved['DAYS_LAST_DUE'] == 365243] active_pay = pay[pay['SK_ID_PREV'].isin(active_df['SK_ID_PREV'])] active_pay_agg = active_pay.groupby('SK_ID_PREV')[['AMT_INSTALMENT', 'AMT_PAYMENT']].sum() active_pay_agg.reset_index(inplace= True) active_pay_agg['INSTALMENT_PAYMENT_DIFF'] = active_pay_agg['AMT_INSTALMENT'] - active_pay_agg['AMT_PAYMENT'] active_df = active_df.merge(active_pay_agg, on= 'SK_ID_PREV', how= 'left') active_df['REMAINING_DEBT'] = active_df['AMT_CREDIT'] - active_df['AMT_PAYMENT'] active_df['REPAYMENT_RATIO'] = active_df['AMT_PAYMENT'] / active_df['AMT_CREDIT'] active_agg_df = group(active_df, 'PREV_ACTIVE_', PREVIOUS_ACTIVE_AGG) active_agg_df['TOTAL_REPAYMENT_RATIO'] = active_agg_df['PREV_ACTIVE_AMT_PAYMENT_SUM']/\ active_agg_df['PREV_ACTIVE_AMT_CREDIT_SUM'] del active_pay, active_pay_agg, active_df; gc.collect()
Home Credit Default Risk
18,348,927
import matplotlib.pyplot as plt import tensorflow as tf from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences<load_from_csv>
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True) prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True) prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True) prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True) prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True )
Home Credit Default Risk
18,348,927
train_data = pd.read_csv('.. /input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep = '\t') train_data.head()<load_from_csv>
prev['DAYS_LAST_DUE_DIFF'] = prev['DAYS_LAST_DUE_1ST_VERSION'] - prev['DAYS_LAST_DUE'] approved['DAYS_LAST_DUE_DIFF'] = approved['DAYS_LAST_DUE_1ST_VERSION'] - approved['DAYS_LAST_DUE']
Home Credit Default Risk
18,348,927
test_data = pd.read_csv('.. /input/sentiment-analysis-on-movie-reviews/test.tsv.zip',sep = '\t') test_data.head()<count_unique_values>
categorical_agg = {key: ['mean'] for key in categorical_cols}
Home Credit Default Risk
18,348,927
print(train_data['Sentiment'].unique()) train_data['Sentiment'].nunique()<count_values>
agg_prev = group(prev, 'PREV_', {**PREVIOUS_AGG, **categorical_agg}) agg_prev = agg_prev.merge(active_agg_df, how='left', on='SK_ID_CURR') del active_agg_df; gc.collect()
Home Credit Default Risk
18,348,927
train_data['Sentiment'].value_counts()<import_modules>
agg_prev = group_and_merge(approved, agg_prev, 'APPROVED_', PREVIOUS_APPROVED_AGG) refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1] agg_prev = group_and_merge(refused, agg_prev, 'REFUSED_', PREVIOUS_REFUSED_AGG) del approved, refused; gc.collect()
Home Credit Default Risk
18,348,927
from tqdm import tqdm import nltk from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords import re<string_transform>
for loan_type in ['Consumer loans', 'Cash loans']: type_df = prev[prev['NAME_CONTRACT_TYPE_{}'.format(loan_type)] == 1] prefix = 'PREV_' + loan_type.split(" ")[0] + '_' agg_prev = group_and_merge(type_df, agg_prev, prefix, PREVIOUS_LOAN_TYPE_AGG) del type_df; gc.collect()
Home Credit Default Risk
18,348,927
def clean_sentences(df): reviews = [] for sent in tqdm(df['Phrase']): text = re.sub("[^a-zA-Z]"," ",sent) words = word_tokenize(text.lower()) new_words = [ ele for ele in words if ele.lower() not in stopwords.words('english')] lem = WordNetLemmatizer() lem_words = [lem.lemmatize(i)for i in new_words] reviews.append(lem_words) return(reviews )<string_transform>
pay['LATE_PAYMENT'] = pay['DAYS_ENTRY_PAYMENT'] - pay['DAYS_INSTALMENT'] pay['LATE_PAYMENT'] = pay['LATE_PAYMENT'].apply(lambda x: 1 if x > 0 else 0) dpd_id = pay[pay['LATE_PAYMENT'] > 0]['SK_ID_PREV'].unique()
Home Credit Default Risk
18,348,927
%%time train_sentences = clean_sentences(train_data) test_sentences = clean_sentences(test_data) print(len(train_sentences)) print(len(test_sentences))<string_transform>
agg_dpd = group_and_merge(prev[prev['SK_ID_PREV'].isin(dpd_id)], agg_prev, 'PREV_LATE_', PREVIOUS_LATE_PAYMENTS_AGG) del agg_dpd, dpd_id; gc.collect()
Home Credit Default Risk
18,348,927
print(train_data['Phrase'][0]) print(( " " ).join(train_sentences[0]))<prepare_x_and_y>
df = pd.merge(df, agg_prev, on='SK_ID_CURR', how='left' )
Home Credit Default Risk
18,348,927
y_target = to_categorical(train_data['Sentiment'].values )<split>
train = df[df['TARGET'].notnull() ] test = df[df['TARGET'].isnull() ] del df gc.collect()
Home Credit Default Risk
18,348,927
X_train,X_val,y_train,y_val = train_test_split(train_sentences,y_target,test_size = 0.2,stratify = y_target )<count_unique_values>
labels = train['TARGET'] train = train.drop(columns=['TARGET']) test = test.drop(columns=['TARGET'] )
Home Credit Default Risk
18,348,927
unique_words = set() len_max = -1 for sent in tqdm(X_train): unique_words.update(sent) if(len_max < len(sent)) : len_max = len(sent) print('Words in vocab : ' , len(list(unique_words))) print('Max_length : ' , len_max )<define_variables>
feature = list(train.columns) train.replace([np.inf, -np.inf], np.nan, inplace=True) test.replace([np.inf, -np.inf], np.nan, inplace=True) test_df = test.copy() train_df = train.copy() train_df['TARGET'] = labels
Home Credit Default Risk
18,348,927
vocab_size = len(list(unique_words)) embedding_dim = 300 max_length = len_max trunc_type = 'post' padding_type = 'post' oov_tok = '<OOV>'<string_transform>
imputer = SimpleImputer(strategy = 'median') imputer.fit(train) train = imputer.transform(train) test = imputer.transform(test )
Home Credit Default Risk
18,348,927
%%time tokenizer = Tokenizer(num_words = vocab_size, ', oov_token = oov_tok, char_level = False) tokenizer.fit_on_texts(list(X_train)) X_train = tokenizer.texts_to_sequences(X_train) X_train = pad_sequences(X_train, maxlen = max_length, padding = padding_type, truncating = trunc_type) X_val = tokenizer.texts_to_sequences(X_val) X_val = pad_sequences(X_val, maxlen = max_length, padding = padding_type, truncating = trunc_type) X_test = tokenizer.texts_to_sequences(test_sentences) X_test = pad_sequences(X_test, maxlen = max_length, padding = padding_type, truncating = trunc_type )<import_modules>
scaler = MinMaxScaler(feature_range =(0, 1)) scaler.fit(train) train = scaler.transform(train) est = scaler.transform(test )
Home Credit Default Risk
18,348,927
from keras.models import Sequential from keras.layers import Dense,Bidirectional,LSTM,Activation,Conv1D,MaxPool1D,Dropout from keras.layers.embeddings import Embedding<choose_model_class>
log_reg = LogisticRegression(C = 0.0001) log_reg.fit(train, labels )
Home Credit Default Risk
18,348,927
model = Sequential() model.add(Embedding(vocab_size,embedding_dim,input_length = max_length)) model.add(Bidirectional(LSTM(128,dropout = 0.2, recurrent_dropout = 0.2, return_sequences=True))) model.add(Bidirectional(LSTM(64, dropout = 0.2, recurrent_dropout = 0.2, return_sequences=False))) model.add(Dense(128,activation = 'relu')) model.add(Dense(y_target.shape[1],activation = 'softmax')) model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy']) model.summary()<choose_model_class>
log_reg_pred = log_reg.predict_proba(test)[:, 1]
Home Credit Default Risk
18,348,927
early_stopping = EarlyStopping(min_delta = 0.001, mode = 'max', monitor = 'val_acc', patience = 2) callback = [early_stopping]<train_model>
submit = test_df[['SK_ID_CURR']] submit['TARGET'] = log_reg_pred
Home Credit Default Risk
18,348,927
%%time num_epochs = 4 history = model.fit(X_train,y_train, validation_data =(X_val, y_val), epochs = num_epochs, batch_size = 256, verbose = 1, callbacks = callback )<define_variables>
submit.to_csv('log_reg.csv', index = False )
Home Credit Default Risk
18,348,927
test_id = test_data['PhraseId']<predict_on_test>
target = train_df.pop('TARGET') len_train = len(train_df) merged_df = pd.concat([train_df, test_df]) meta_df = merged_df.pop('SK_ID_CURR') del test_df, train_df gc.collect()
Home Credit Default Risk
18,348,927
y_pred = np.argmax(model.predict(X_test), axis = -1 )<save_to_csv>
categorical_feats = merged_df.columns[merged_df.dtypes == 'object'] print('Using %d prediction variables'%(merged_df.shape[1])) print('Encoding %d non-numeric columns...'%(merged_df.columns[merged_df.dtypes == 'object'].shape)) for feat in categorical_feats: merged_df[feat].fillna('MISSING', inplace=True) encoder = LabelBinarizer() new_columns = encoder.fit_transform(merged_df[feat]) i=0 for u in merged_df[feat].unique() : if i<new_columns.shape[1]: merged_df[feat+'_'+u]=new_columns[:,i] i+=1 merged_df.drop(feat, axis=1, inplace=True) print('Now using %d prediction variables'%(merged_df.shape[1]))
Home Credit Default Risk
18,348,927
submission_df = pd.DataFrame({'PhraseId': test_id, 'Sentiment': y_pred}) submission_df.to_csv('submission_.csv', index=False) submission_df.head()<load_from_csv>
null_counts = merged_df.isnull().sum() null_counts = null_counts[null_counts > 0] null_ratios = null_counts / len(merged_df)
Home Credit Default Risk
18,348,927
train_data = pd.read_csv('.. /input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep = '\t') train_data.head()<load_from_csv>
null_thresh =.8 null_cols = null_ratios[null_ratios > null_thresh].index merged_df.drop(null_cols, axis=1, inplace=True) if null_cols.shape[0] > 0: print('Columns dropped for being over %.2f null:'%(null_thresh)) for col in null_cols: print(col )
Home Credit Default Risk
18,348,927
test_data = pd.read_csv('.. /input/sentiment-analysis-on-movie-reviews/test.tsv.zip',sep = '\t') test_data.head()<import_modules>
merged_df.fillna(0, inplace=True)
Home Credit Default Risk
18,348,927
import matplotlib.pyplot as plt import tensorflow as tf<import_modules>
for feat in merged_df.columns: if(merged_df[feat].max() > 100)|(merged_df[feat].min() < -100): merged_df[feat]=merged_df[feat].astype(np.float64) scaler = StandardScaler() continuous_feats = merged_df.columns[merged_df.dtypes == 'float64'] print('Scaling %d features...'%(continuous_feats.shape)) s1 = merged_df.shape[0],1 for feat in continuous_feats: merged_df[feat] = scaler.fit_transform(merged_df[feat].values.reshape(s1))
Home Credit Default Risk
18,348,927
from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences<set_options>
train_df = merged_df[:len_train] test_df = merged_df[len_train:] del merged_df gc.collect()
Home Credit Default Risk
18,348,927
print("TF version: ", tf.__version__) if tf.__version__ < "2.0.0": tf.enable_eager_execution() print("Eager execution enabled.") else: print("Eager execution enabled by default.") if tf.test.gpu_device_name() : print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) else: print("Please install GPU version of TF" )<count_unique_values>
L2c = 4e-4 lr0 = 0.02 lr_decay = 0.90 iterations = 11 ROWS = train_df.shape[0] VARS = train_df.shape[1] NUMB = 10000 NN = int(ROWS/NUMB )
Home Credit Default Risk
18,348,927
print(train_data['Sentiment'].unique()) train_data['Sentiment'].nunique()<count_values>
tf.disable_v2_behavior() y_ = tf.placeholder(tf.float32, [None, 1]) x = tf.placeholder(tf.float32, [None, VARS]) W = tf.Variable(tf.truncated_normal([VARS,1],mean=0.0,stddev=0.001),dtype=np.float32) NUML1 = 10 W1 = tf.Variable(tf.truncated_normal([VARS,NUML1],mean=0.0,stddev=0.0001),dtype=np.float32) W1f = tf.Variable(tf.truncated_normal([NUML1,1],mean=0.0,stddev=0.0001),dtype=np.float32) logit1 = tf.matmul(x, W)+ tf.matmul(tf.nn.relu(tf.matmul(x, W1)) , W1f) y = tf.nn.sigmoid(logit1 )
Home Credit Default Risk
18,348,927
train_data['Sentiment'].value_counts()<import_modules>
loss0 = tf.reduce_mean(( y_-y)*(y_-y)) loss1 = L2c *(tf.nn.l2_loss(W)+ tf.nn.l2_loss(W1)+ tf.nn.l2_loss(W1f)) loss = loss0 + loss1 global_step = tf.Variable(0, trainable=False) learning_rate = tf.train.exponential_decay(lr0, global_step, NN, lr_decay) train_step = tf.train.AdamOptimizer(learning_rate=learning_rate ).minimize(loss,global_step=global_step) sess = tf.InteractiveSession() tf.global_variables_initializer().run()
Home Credit Default Risk
18,348,927
from tqdm import tqdm import nltk from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords import re<string_transform>
y0=target.values.astype(np.float32) x0=train_df.values.astype(np.float32) del train_df gc.collect() y0_1=np.where(y0[0:int(NN*0.8)*NUMB] == 1)[0] y0_0=np.where(y0[0:int(NN*0.8)*NUMB] == 0)[0] for i in range(iterations): for j in range(int(NN*0.8)) : pos_ratio = 0.5 pos_idx = np.random.choice(y0_1, size=int(np.round(NUMB*pos_ratio))) neg_idx = np.random.choice(y0_0, size=int(np.round(NUMB*(1-pos_ratio)))) idx = np.concatenate([pos_idx, neg_idx]) fd = {y_: y0[idx].reshape(NUMB,1),x: x0[idx,:]} _= sess.run([train_step], feed_dict=fd) if i%10 == 0: fd = {y_: y0.reshape(y0.shape[0],1),x: x0} y1 = sess.run(y, feed_dict=fd) lim = int(NN*0.8)* NUMB auc1 = roc_auc_score(y0[0:lim],y1[0:lim,0]) auc2 = roc_auc_score(y0[lim:y0.shape[0]],y1[lim:y0.shape[0],0]) print('iteration %d, auc train/validatet %.5f/%.5f'%(i,auc1,auc2))
Home Credit Default Risk
18,348,927
def sentence_cleaning(df): sentence = [] for sent in tqdm(df['Phrase']): text = re.sub("[^a-zA-Z]"," ",sent) word = word_tokenize(text.lower()) lemmatizer = WordNetLemmatizer() lemm_word = [lemmatizer.lemmatize(i)for i in word] sentence.append(lemm_word) return(sentence )<prepare_x_and_y>
x0 = test_df.values.astype(np.float32) fd = {y_: np.zeros([x0.shape[0],1]),x: x0} y_pred = sess.run(y, feed_dict=fd) out_df = pd.DataFrame({'SK_ID_CURR': meta_df[len_train:], 'TARGET': y_pred[:,0]}) out_df.to_csv('nn_submission.csv', index=False)
Home Credit Default Risk
18,348,927
target_col = train_data.Sentiment.values y_target = to_categorical(target_col) y_target.shape<split>
nn_result=pd.read_csv('.. /input/pial-data-n/nn_p.csv') log_result=pd.read_csv('.. /input/pial-data/lgb_p.csv') nn_result.rename(columns={'TARGET':'nn_TARGET'},inplace=True) log_result.rename(columns={'TARGET':'log_TARGET'},inplace=True) sub=pd.merge(nn_result,log_result,on='SK_ID_CURR') sub['TARGET']=0*sub['nn_TARGET']+1*sub['log_TARGET'] sub=sub.drop(columns=['log_TARGET','nn_TARGET']) sub.to_csv('subm_nn_log_p.csv', index=False, float_format='%.8f' )
Home Credit Default Risk
16,720,939
X_train,X_val,y_train,y_val = train_test_split(train_sent,y_target,test_size = 0.2,stratify = y_target )<count_unique_values>
warnings.filterwarnings('ignore' )
Home Credit Default Risk
16,720,939
unique_words = set() max_len = 0 for sent in tqdm(X_train): unique_words.update(sent) if(max_len < len(sent)) : max_len = len(sent) sentence = sent<define_variables>
def one_hot_encoder(df, nan_as_category=True): original_columns = list(df.columns) categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category) new_columns = [c for c in df.columns if c not in original_columns] return df, new_columns def group(df_to_agg, prefix, aggregations, aggregate_by= 'SK_ID_CURR'): agg_df = df_to_agg.groupby(aggregate_by ).agg(aggregations) agg_df.columns = pd.Index(['{}{}_{}'.format(prefix, e[0], e[1].upper()) for e in agg_df.columns.tolist() ]) return agg_df.reset_index() def group_and_merge(df_to_agg, df_to_merge, prefix, aggregations, aggregate_by= 'SK_ID_CURR'): agg_df = group(df_to_agg, prefix, aggregations, aggregate_by= aggregate_by) return df_to_merge.merge(agg_df, how='left', on= aggregate_by) def do_sum(dataframe, group_cols, counted, agg_name): gp = dataframe[group_cols + [counted]].groupby(group_cols)[counted].sum().reset_index().rename(columns={counted: agg_name}) dataframe = dataframe.merge(gp, on=group_cols, how='left') return dataframe def reduce_mem_usage(dataframe): m_start = dataframe.memory_usage().sum() / 1024 ** 2 for col in dataframe.columns: col_type = dataframe[col].dtype if col_type != object: c_min = dataframe[col].min() c_max = dataframe[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8 ).min and c_max < np.iinfo(np.int8 ).max: dataframe[col] = dataframe[col].astype(np.int8) elif c_min > np.iinfo(np.int16 ).min and c_max < np.iinfo(np.int16 ).max: dataframe[col] = dataframe[col].astype(np.int16) elif c_min > np.iinfo(np.int32 ).min and c_max < np.iinfo(np.int32 ).max: dataframe[col] = dataframe[col].astype(np.int32) elif c_min > np.iinfo(np.int64 ).min and c_max < np.iinfo(np.int64 ).max: dataframe[col] = dataframe[col].astype(np.int64) elif str(col_type)[:5] == 'float': if c_min > np.finfo(np.float16 ).min and c_max < np.finfo(np.float16 ).max: dataframe[col] = dataframe[col].astype(np.float16) elif c_min > np.finfo(np.float32 ).min and c_max < np.finfo(np.float32 ).max: dataframe[col] = dataframe[col].astype(np.float32) else: dataframe[col] = dataframe[col].astype(np.float64) m_end = dataframe.memory_usage().sum() / 1024 ** 2 return dataframe nan_as_category = True def risk_groupanizer(dataframe, column_names, target_val=1, upper_limit_ratio=8.2, lower_limit_ratio=8.2): all_cols = dataframe.columns for col in column_names: temp_df = dataframe.groupby([col] + ['TARGET'])[['SK_ID_CURR']].count().reset_index() temp_df['ratio%'] = round(temp_df['SK_ID_CURR']*100/temp_df.groupby([col])['SK_ID_CURR'].transform('sum'), 1) col_groups_high_risk = temp_df[(temp_df['TARGET'] == target_val)& (temp_df['ratio%'] >= upper_limit_ratio)][col].tolist() col_groups_low_risk = temp_df[(temp_df['TARGET'] == target_val)& (lower_limit_ratio >= temp_df['ratio%'])][col].tolist() if upper_limit_ratio != lower_limit_ratio: col_groups_medium_risk = temp_df[(temp_df['TARGET'] == target_val)& (upper_limit_ratio > temp_df['ratio%'])&(temp_df['ratio%'] > lower_limit_ratio)][col].tolist() for risk, col_groups in zip(['_high_risk', '_medium_risk', '_low_risk'], [col_groups_high_risk, col_groups_medium_risk, col_groups_low_risk]): dataframe[col + risk] = [1 if val in col_groups else 0 for val in dataframe[col].values] else: for risk, col_groups in zip(['_high_risk', '_low_risk'], [col_groups_high_risk, col_groups_low_risk]): dataframe[col + risk] = [1 if val in col_groups else 0 for val in dataframe[col].values] if dataframe[col].dtype == 'O' or dataframe[col].dtype == 'object': dataframe.drop(col, axis=1, inplace=True) return dataframe, list(set(dataframe.columns ).difference(set(all_cols))) def ligthgbm_feature_selection(dataframe, index_cols, auc_limit=0.7): dataframe = dataframe.rename(columns=lambda x: re.sub('[^A-Za-z0-9_]+', '_', x)) clf = LGBMClassifier(random_state=0) train_df = dataframe[dataframe['TARGET'].notnull() ] train_df_X = train_df.drop('TARGET', axis=1) train_df_y = train_df['TARGET'] train_columns = [col for col in train_df_X.columns if col not in index_cols] max_auc_score = 1 best_cols = [] while max_auc_score > auc_limit: train_columns = [col for col in train_columns if col not in best_cols] clf.fit(train_df_X[train_columns], train_df_y) feats_imp = pd.Series(clf.feature_importances_, index=train_columns) max_auc_score = roc_auc_score(train_df_y, clf.predict_proba(train_df_X[train_columns])[:, 1]) best_cols = feats_imp[feats_imp > 0].index.tolist() dataframe.drop(train_columns, axis=1, inplace=True) return dataframe
Home Credit Default Risk
16,720,939
vocabulary = len(list(unique_words)) oov = '<OOV>' embedding_dim = 300 padding = 'post' trunc = 'post'<string_transform>
def application() : df = pd.read_csv(r'.. /input/home-credit-default-risk/application_train.csv') test_df = pd.read_csv(r'.. /input/home-credit-default-risk/application_test.csv') df = df.append(test_df ).reset_index() df = df[df['CODE_GENDER'] != 'XNA'] df = df[df['AMT_INCOME_TOTAL'] < 20000000] df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True) df['DAYS_LAST_PHONE_CHANGE'].replace(0, np.nan, inplace=True) for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']: df[bin_feature], uniques = pd.factorize(df[bin_feature]) df, cat_cols = one_hot_encoder(df, nan_as_category) docs = [f for f in df.columns if 'FLAG_DOC' in f] df['DOCUMENT_COUNT'] = df[docs].sum(axis=1) df['NEW_DOC_KURT'] = df[docs].kurtosis(axis=1) def get_age_label(days_birth): age_years = -days_birth / 365 if age_years < 27: return 1 elif age_years < 40: return 2 elif age_years < 50: return 3 elif age_years < 65: return 4 elif age_years < 99: return 5 else: return 0 df['AGE_RANGE'] = df['DAYS_BIRTH'].apply(lambda x: get_age_label(x)) df['EXT_SOURCES_PROD'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3'] df['EXT_SOURCES_WEIGHTED'] = df.EXT_SOURCE_1 * 2 + df.EXT_SOURCE_2 * 1 + df.EXT_SOURCE_3 * 3 np.warnings.filterwarnings('ignore', r'All-NaN(slice|axis)encountered') for function_name in ['min', 'max', 'mean', 'nanmedian', 'var']: feature_name = 'EXT_SOURCES_{}'.format(function_name.upper()) df[feature_name] = eval('np.{}'.format(function_name))( df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']], axis=1) df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH'] df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT'] df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS'] df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL'] df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT'] df['CREDIT_TO_GOODS_RATIO'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE'] df['INCOME_TO_EMPLOYED_RATIO'] = df['AMT_INCOME_TOTAL'] / df['DAYS_EMPLOYED'] df['INCOME_TO_BIRTH_RATIO'] = df['AMT_INCOME_TOTAL'] / df['DAYS_BIRTH'] df['ID_TO_BIRTH_RATIO'] = df['DAYS_ID_PUBLISH'] / df['DAYS_BIRTH'] df['CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH'] df['CAR_TO_EMPLOYED_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED'] df['PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH'] df['APPS_EXT_SOURCE_MEAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1) df['APPS_EXT_SOURCE_STD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1) df['APPS_EXT_SOURCE_STD'] = df['APPS_EXT_SOURCE_STD'].fillna(df['APPS_EXT_SOURCE_STD'].mean()) df['APP_SCORE1_TO_BIRTH_RATIO'] = df['EXT_SOURCE_1'] /(df['DAYS_BIRTH'] / 365.25) df['APP_SCORE2_TO_BIRTH_RATIO'] = df['EXT_SOURCE_2'] /(df['DAYS_BIRTH'] / 365.25) df['APP_SCORE3_TO_BIRTH_RATIO'] = df['EXT_SOURCE_3'] /(df['DAYS_BIRTH'] / 365.25) df['APP_SCORE1_TO_EMPLOY_RATIO'] = df['EXT_SOURCE_1'] /(df['DAYS_EMPLOYED'] / 365.25) df['APP_EXT_SOURCE_2*EXT_SOURCE_3*DAYS_BIRTH'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['DAYS_BIRTH'] df['APP_SCORE1_TO_FAM_CNT_RATIO'] = df['EXT_SOURCE_1'] / df['CNT_FAM_MEMBERS'] df['APP_SCORE1_TO_GOODS_RATIO'] = df['EXT_SOURCE_1'] / df['AMT_GOODS_PRICE'] df['APP_SCORE1_TO_CREDIT_RATIO'] = df['EXT_SOURCE_1'] / df['AMT_CREDIT'] df['APP_SCORE1_TO_SCORE2_RATIO'] = df['EXT_SOURCE_1'] / df['EXT_SOURCE_2'] df['APP_SCORE1_TO_SCORE3_RATIO'] = df['EXT_SOURCE_1'] / df['EXT_SOURCE_3'] df['APP_SCORE2_TO_CREDIT_RATIO'] = df['EXT_SOURCE_2'] / df['AMT_CREDIT'] df['APP_SCORE2_TO_REGION_RATING_RATIO'] = df['EXT_SOURCE_2'] / df['REGION_RATING_CLIENT'] df['APP_SCORE2_TO_CITY_RATING_RATIO'] = df['EXT_SOURCE_2'] / df['REGION_RATING_CLIENT_W_CITY'] df['APP_SCORE2_TO_POP_RATIO'] = df['EXT_SOURCE_2'] / df['REGION_POPULATION_RELATIVE'] df['APP_SCORE2_TO_PHONE_CHANGE_RATIO'] = df['EXT_SOURCE_2'] / df['DAYS_LAST_PHONE_CHANGE'] df['APP_EXT_SOURCE_1*EXT_SOURCE_2'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] df['APP_EXT_SOURCE_1*EXT_SOURCE_3'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_3'] df['APP_EXT_SOURCE_2*EXT_SOURCE_3'] = df['EXT_SOURCE_2'] * df['EXT_SOURCE_3'] df['APP_EXT_SOURCE_1*DAYS_EMPLOYED'] = df['EXT_SOURCE_1'] * df['DAYS_EMPLOYED'] df['APP_EXT_SOURCE_2*DAYS_EMPLOYED'] = df['EXT_SOURCE_2'] * df['DAYS_EMPLOYED'] df['APP_EXT_SOURCE_3*DAYS_EMPLOYED'] = df['EXT_SOURCE_3'] * df['DAYS_EMPLOYED'] df['APPS_GOODS_INCOME_RATIO'] = df['AMT_GOODS_PRICE'] / df['AMT_INCOME_TOTAL'] df['APPS_CNT_FAM_INCOME_RATIO'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS'] df['APPS_INCOME_EMPLOYED_RATIO'] = df['AMT_INCOME_TOTAL'] / df['DAYS_EMPLOYED'] df['CREDIT_TO_GOODS_RATIO_2'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE'] df['APP_AMT_INCOME_TOTAL_12_AMT_ANNUITY_ratio'] = df['AMT_INCOME_TOTAL'] / 12.- df['AMT_ANNUITY'] df['APP_INCOME_TO_EMPLOYED_RATIO'] = df['AMT_INCOME_TOTAL'] / df['DAYS_EMPLOYED'] df['APP_DAYS_LAST_PHONE_CHANGE_DAYS_EMPLOYED_ratio'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED'] df['APP_DAYS_EMPLOYED_DAYS_BIRTH_diff'] = df['DAYS_EMPLOYED'] - df['DAYS_BIRTH'] print('"Application_Train_Test" final shape:', df.shape) return df
Home Credit Default Risk
16,720,939
tokenizer = Tokenizer(num_words = vocabulary,oov_token = oov,char_level = False) tokenizer.fit_on_texts(list(X_train)) X_train = tokenizer.texts_to_sequences(X_train) X_train = pad_sequences(X_train,maxlen = max_len,padding=padding,truncating = trunc) X_val = tokenizer.texts_to_sequences(X_val) X_val = pad_sequences(X_val,maxlen = max_len,padding=padding,truncating = trunc) X_test = tokenizer.texts_to_sequences(test_sent) X_test = pad_sequences(X_test,maxlen = max_len,padding=padding,truncating = trunc )<import_modules>
def bureau_bb() : bureau = pd.read_csv(r'.. /input/home-credit-default-risk/bureau.csv') bb = pd.read_csv(r'.. /input/home-credit-default-risk/bureau_balance.csv') bureau['CREDIT_DURATION'] = -bureau['DAYS_CREDIT'] + bureau['DAYS_CREDIT_ENDDATE'] bureau['ENDDATE_DIF'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT'] bureau['DEBT_PERCENTAGE'] = bureau['AMT_CREDIT_SUM'] / bureau['AMT_CREDIT_SUM_DEBT'] bureau['DEBT_CREDIT_DIFF'] = bureau['AMT_CREDIT_SUM'] - bureau['AMT_CREDIT_SUM_DEBT'] bureau['CREDIT_TO_ANNUITY_RATIO'] = bureau['AMT_CREDIT_SUM'] / bureau['AMT_ANNUITY'] bureau['BUREAU_CREDIT_FACT_DIFF'] = bureau['DAYS_CREDIT'] - bureau['DAYS_ENDDATE_FACT'] bureau['BUREAU_CREDIT_ENDDATE_DIFF'] = bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE'] bureau['BUREAU_CREDIT_DEBT_RATIO'] = bureau['AMT_CREDIT_SUM_DEBT'] / bureau['AMT_CREDIT_SUM'] bureau['BUREAU_IS_DPD'] = bureau['CREDIT_DAY_OVERDUE'].apply(lambda x: 1 if x > 0 else 0) bureau['BUREAU_IS_DPD_OVER120'] = bureau['CREDIT_DAY_OVERDUE'].apply(lambda x: 1 if x > 120 else 0) bb, bb_cat = one_hot_encoder(bb, nan_as_category) bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category) bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size', 'mean']} for col in bb_cat: bb_aggregations[col] = ['mean'] bb_agg = bb.groupby('SK_ID_BUREAU' ).agg(bb_aggregations) bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist() ]) bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU') num_aggregations = { 'DAYS_CREDIT': ['min', 'max', 'mean', 'var'], 'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'], 'DAYS_CREDIT_UPDATE': ['mean'], 'CREDIT_DAY_OVERDUE': ['max', 'mean', 'min'], 'AMT_CREDIT_MAX_OVERDUE': ['mean', 'max'], 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_OVERDUE': ['mean', 'max', 'sum'], 'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'], 'AMT_ANNUITY': ['max', 'mean', 'sum'], 'CNT_CREDIT_PROLONG': ['sum'], 'MONTHS_BALANCE_MIN': ['min'], 'MONTHS_BALANCE_MAX': ['max'], 'MONTHS_BALANCE_SIZE': ['mean', 'sum'], 'SK_ID_BUREAU': ['count'], 'DAYS_ENDDATE_FACT': ['min', 'max', 'mean'], 'ENDDATE_DIF': ['min', 'max', 'mean'], 'BUREAU_CREDIT_FACT_DIFF': ['min', 'max', 'mean'], 'BUREAU_CREDIT_ENDDATE_DIFF': ['min', 'max', 'mean'], 'BUREAU_CREDIT_DEBT_RATIO': ['min', 'max', 'mean'], 'DEBT_CREDIT_DIFF': ['min', 'max', 'mean'], 'BUREAU_IS_DPD': ['mean', 'sum'], 'BUREAU_IS_DPD_OVER120': ['mean', 'sum'] } cat_aggregations = {} for cat in bureau_cat: cat_aggregations[cat] = ['mean'] for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean'] bureau_agg = bureau.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations}) bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist() ]) active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1] active_agg = active.groupby('SK_ID_CURR' ).agg(num_aggregations) active_agg.columns = pd.Index(['ACTIVE_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist() ]) bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR') closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1] closed_agg = closed.groupby('SK_ID_CURR' ).agg(num_aggregations) closed_agg.columns = pd.Index(['CLOSED_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist() ]) bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR') print('"Bureau/Bureau Balance" final shape:', bureau_agg.shape) return bureau_agg
Home Credit Default Risk
16,720,939
from keras.models import Sequential from keras.layers import Dense,Bidirectional,Activation,Dropout,LSTM,Embedding from keras.layers.embeddings import Embedding<choose_model_class>
def previous_application() : prev = pd.read_csv(r'.. /input/home-credit-default-risk/previous_application.csv') prev, cat_cols = one_hot_encoder(prev, nan_as_category=True) prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace=True) prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace=True) prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace=True) prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace=True) prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace=True) prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT'] prev['APPLICATION_CREDIT_DIFF'] = prev['AMT_APPLICATION'] - prev['AMT_CREDIT'] prev['CREDIT_TO_ANNUITY_RATIO'] = prev['AMT_CREDIT'] / prev['AMT_ANNUITY'] prev['DOWN_PAYMENT_TO_CREDIT'] = prev['AMT_DOWN_PAYMENT'] / prev['AMT_CREDIT'] total_payment = prev['AMT_ANNUITY'] * prev['CNT_PAYMENT'] prev['SIMPLE_INTERESTS'] =(total_payment / prev['AMT_CREDIT'] - 1)/ prev['CNT_PAYMENT'] prev['DAYS_LAST_DUE_DIFF'] = prev['DAYS_LAST_DUE_1ST_VERSION'] - prev['DAYS_LAST_DUE'] prev['PREV_GOODS_DIFF'] = prev['AMT_APPLICATION'] - prev['AMT_GOODS_PRICE'] prev['PREV_ANNUITY_APPL_RATIO'] = prev['AMT_ANNUITY']/prev['AMT_APPLICATION'] prev['PREV_GOODS_APPL_RATIO'] = prev['AMT_GOODS_PRICE'] / prev['AMT_APPLICATION'] num_aggregations = { 'AMT_ANNUITY': ['min', 'max', 'mean', 'sum'], 'AMT_APPLICATION': ['min', 'max', 'mean', 'sum'], 'AMT_CREDIT': ['min', 'max', 'mean', 'sum'], 'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'], 'AMT_DOWN_PAYMENT': ['min', 'max', 'mean', 'sum'], 'AMT_GOODS_PRICE': ['min', 'max', 'mean', 'sum'], 'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'], 'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'], 'DAYS_DECISION': ['min', 'max', 'mean'], 'CNT_PAYMENT': ['mean', 'sum'], 'SK_ID_PREV': ['nunique'], 'DAYS_TERMINATION': ['max'], 'CREDIT_TO_ANNUITY_RATIO': ['mean', 'max'], 'APPLICATION_CREDIT_DIFF': ['min', 'max', 'mean', 'sum'], 'DOWN_PAYMENT_TO_CREDIT': ['mean'], 'PREV_GOODS_DIFF': ['mean', 'max', 'sum'], 'PREV_GOODS_APPL_RATIO': ['mean', 'max'], 'DAYS_LAST_DUE_DIFF': ['mean', 'max', 'sum'], 'SIMPLE_INTERESTS': ['mean', 'max'] } cat_aggregations = {} for cat in cat_cols: cat_aggregations[cat] = ['mean'] prev_agg = prev.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations}) prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist() ]) approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1] approved_agg = approved.groupby('SK_ID_CURR' ).agg(num_aggregations) approved_agg.columns = pd.Index(['APPROVED_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist() ]) prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR') refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1] refused_agg = refused.groupby('SK_ID_CURR' ).agg(num_aggregations) refused_agg.columns = pd.Index(['REFUSED_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist() ]) prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR') print('"Previous Applications" final shape:', prev_agg.shape) return prev_agg
Home Credit Default Risk
16,720,939
model = tf.keras.Sequential() model.add(Embedding(vocabulary,embedding_dim,input_length = max_len)) model.add(Bidirectional(LSTM(128, dropout = 0.8, recurrent_dropout=0.8, return_sequences=True))) model.add(Bidirectional(LSTM(128,dropout = 0.5,recurrent_dropout=0.5,return_sequences=False))) model.add(Dense(64,activation='relu')) model.add(Dropout(0.4)) model.add(Dense(5,activation='softmax')) model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy']) model.summary()<choose_model_class>
def pos_cash() : pos = pd.read_csv(r'.. /input/home-credit-default-risk/POS_CASH_balance.csv') pos, cat_cols = one_hot_encoder(pos, nan_as_category=True) pos['LATE_PAYMENT'] = pos['SK_DPD'].apply(lambda x: 1 if x > 0 else 0) pos['POS_IS_DPD'] = pos['SK_DPD'].apply(lambda x: 1 if x > 0 else 0) pos['POS_IS_DPD_UNDER_120'] = pos['SK_DPD'].apply(lambda x: 1 if(x > 0)&(x < 120)else 0) pos['POS_IS_DPD_OVER_120'] = pos['SK_DPD'].apply(lambda x: 1 if x >= 120 else 0) aggregations = { 'MONTHS_BALANCE': ['max', 'mean', 'size', 'min'], 'SK_DPD': ['max', 'mean', 'sum', 'var', 'min'], 'SK_DPD_DEF': ['max', 'mean', 'sum'], 'SK_ID_PREV': ['nunique'], 'LATE_PAYMENT': ['mean'], 'SK_ID_CURR': ['count'], 'CNT_INSTALMENT': ['min', 'max', 'mean', 'sum'], 'CNT_INSTALMENT_FUTURE': ['min', 'max', 'mean', 'sum'], 'POS_IS_DPD': ['mean', 'sum'], 'POS_IS_DPD_UNDER_120': ['mean', 'sum'], 'POS_IS_DPD_OVER_120': ['mean', 'sum'], } for cat in cat_cols: aggregations[cat] = ['mean'] pos_agg = pos.groupby('SK_ID_CURR' ).agg(aggregations) pos_agg.columns = pd.Index(['POS_' + e[0] + "_" + e[1].upper() for e in pos_agg.columns.tolist() ]) pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR' ).size() sort_pos = pos.sort_values(by=['SK_ID_PREV', 'MONTHS_BALANCE']) gp = sort_pos.groupby('SK_ID_PREV') df_pos = pd.DataFrame() df_pos['SK_ID_CURR'] = gp['SK_ID_CURR'].first() df_pos['MONTHS_BALANCE_MAX'] = gp['MONTHS_BALANCE'].max() df_pos['POS_LOAN_COMPLETED_MEAN'] = gp['NAME_CONTRACT_STATUS_Completed'].mean() df_pos['POS_COMPLETED_BEFORE_MEAN'] = gp['CNT_INSTALMENT'].first() - gp['CNT_INSTALMENT'].last() df_pos['POS_COMPLETED_BEFORE_MEAN'] = df_pos.apply(lambda x: 1 if x['POS_COMPLETED_BEFORE_MEAN'] > 0 \ and x['POS_LOAN_COMPLETED_MEAN'] > 0 else 0, axis=1) df_pos['POS_REMAINING_INSTALMENTS'] = gp['CNT_INSTALMENT_FUTURE'].last() df_pos['POS_REMAINING_INSTALMENTS_RATIO'] = gp['CNT_INSTALMENT_FUTURE'].last() /gp['CNT_INSTALMENT'].last() df_gp = df_pos.groupby('SK_ID_CURR' ).sum().reset_index() df_gp.drop(['MONTHS_BALANCE_MAX'], axis=1, inplace= True) pos_agg = pd.merge(pos_agg, df_gp, on= 'SK_ID_CURR', how= 'left') pos = do_sum(pos, ['SK_ID_PREV'], 'LATE_PAYMENT', 'LATE_PAYMENT_SUM') last_month_df = pos.groupby('SK_ID_PREV')['MONTHS_BALANCE'].idxmax() sort_pos = pos.sort_values(by=['SK_ID_PREV', 'MONTHS_BALANCE']) gp = sort_pos.iloc[last_month_df].groupby('SK_ID_CURR' ).tail(3) gp_mean = gp.groupby('SK_ID_CURR' ).mean().reset_index() pos_agg = pd.merge(pos_agg, gp_mean[['SK_ID_CURR', 'LATE_PAYMENT_SUM']], on='SK_ID_CURR', how='left') print('"Pos-Cash" balance final shape:', pos_agg.shape) return pos_agg
Home Credit Default Risk
16,720,939
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'] )<train_model>
def installment() : ins = pd.read_csv(r'.. /input/home-credit-default-risk/installments_payments.csv') ins, cat_cols = one_hot_encoder(ins, nan_as_category=True) ins = do_sum(ins, ['SK_ID_PREV', 'NUM_INSTALMENT_NUMBER'], 'AMT_PAYMENT', 'AMT_PAYMENT_GROUPED') ins['PAYMENT_DIFFERENCE'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT_GROUPED'] ins['PAYMENT_RATIO'] = ins['AMT_INSTALMENT'] / ins['AMT_PAYMENT_GROUPED'] ins['PAID_OVER_AMOUNT'] = ins['AMT_PAYMENT'] - ins['AMT_INSTALMENT'] ins['PAID_OVER'] =(ins['PAID_OVER_AMOUNT'] > 0 ).astype(int) ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT'] ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT'] ins['DPD_diff'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT'] ins['DBD_diff'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT'] ins['DPD'] = ins['DPD_diff'].apply(lambda x: x if x > 0 else 0) ins['DBD'] = ins['DBD_diff'].apply(lambda x: x if x > 0 else 0) ins['LATE_PAYMENT'] = ins['DBD'].apply(lambda x: 1 if x > 0 else 0) ins['INSTALMENT_PAYMENT_RATIO'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT'] ins['LATE_PAYMENT_RATIO'] = ins.apply(lambda x: x['INSTALMENT_PAYMENT_RATIO'] if x['LATE_PAYMENT'] == 1 else 0, axis=1) ins['SIGNIFICANT_LATE_PAYMENT'] = ins['LATE_PAYMENT_RATIO'].apply(lambda x: 1 if x > 0.05 else 0) ins['DPD_7'] = ins['DPD'].apply(lambda x: 1 if x >= 7 else 0) ins['DPD_15'] = ins['DPD'].apply(lambda x: 1 if x >= 15 else 0) ins['INS_IS_DPD_UNDER_120'] = ins['DPD'].apply(lambda x: 1 if(x > 0)&(x < 120)else 0) ins['INS_IS_DPD_OVER_120'] = ins['DPD'].apply(lambda x: 1 if(x >= 120)else 0) aggregations = { 'NUM_INSTALMENT_VERSION': ['nunique'], 'DPD': ['max', 'mean', 'sum', 'var'], 'DBD': ['max', 'mean', 'sum', 'var'], 'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'], 'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'], 'AMT_INSTALMENT': ['max', 'mean', 'sum', 'min'], 'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'], 'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum', 'min'], 'SK_ID_PREV': ['size', 'nunique'], 'PAYMENT_DIFFERENCE': ['mean'], 'PAYMENT_RATIO': ['mean', 'max'], 'LATE_PAYMENT': ['mean', 'sum'], 'SIGNIFICANT_LATE_PAYMENT': ['mean', 'sum'], 'LATE_PAYMENT_RATIO': ['mean'], 'DPD_7': ['mean'], 'DPD_15': ['mean'], 'PAID_OVER': ['mean'], 'DPD_diff':['mean', 'min', 'max'], 'DBD_diff':['mean', 'min', 'max'], 'DAYS_INSTALMENT': ['mean', 'max', 'sum'], 'INS_IS_DPD_UNDER_120': ['mean', 'sum'], 'INS_IS_DPD_OVER_120': ['mean', 'sum'] } for cat in cat_cols: aggregations[cat] = ['mean'] ins_agg = ins.groupby('SK_ID_CURR' ).agg(aggregations) ins_agg.columns = pd.Index(['INSTAL_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist() ]) ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR' ).size() cond_day = ins['DAYS_ENTRY_PAYMENT'] >= -365 ins_d365_grp = ins[cond_day].groupby('SK_ID_CURR') ins_d365_agg_dict = { 'SK_ID_CURR': ['count'], 'NUM_INSTALMENT_VERSION': ['nunique'], 'DAYS_ENTRY_PAYMENT': ['mean', 'max', 'sum'], 'DAYS_INSTALMENT': ['mean', 'max', 'sum'], 'AMT_INSTALMENT': ['mean', 'max', 'sum'], 'AMT_PAYMENT': ['mean', 'max', 'sum'], 'PAYMENT_DIFF': ['mean', 'min', 'max', 'sum'], 'PAYMENT_PERC': ['mean', 'max'], 'DPD_diff': ['mean', 'min', 'max'], 'DPD': ['mean', 'sum'], 'INS_IS_DPD_UNDER_120': ['mean', 'sum'], 'INS_IS_DPD_OVER_120': ['mean', 'sum']} ins_d365_agg = ins_d365_grp.agg(ins_d365_agg_dict) ins_d365_agg.columns = ['INS_D365' +('_' ).join(column ).upper() for column in ins_d365_agg.columns.ravel() ] ins_agg = ins_agg.merge(ins_d365_agg, on='SK_ID_CURR', how='left') print('"Installments Payments" final shape:', ins_agg.shape) return ins_agg
Home Credit Default Risk
16,720,939
hist_model = model.fit(X_train,y_train, validation_data =(X_val, y_val), epochs = 4, batch_size = 256, verbose = 1 )<define_variables>
def credit_card() : cc = pd.read_csv(r'.. /input/home-credit-default-risk/credit_card_balance.csv') cc, cat_cols = one_hot_encoder(cc, nan_as_category=True) cc['LIMIT_USE'] = cc['AMT_BALANCE'] / cc['AMT_CREDIT_LIMIT_ACTUAL'] cc['PAYMENT_DIV_MIN'] = cc['AMT_PAYMENT_CURRENT'] / cc['AMT_INST_MIN_REGULARITY'] cc['LATE_PAYMENT'] = cc['SK_DPD'].apply(lambda x: 1 if x > 0 else 0) cc['DRAWING_LIMIT_RATIO'] = cc['AMT_DRAWINGS_ATM_CURRENT'] / cc['AMT_CREDIT_LIMIT_ACTUAL'] cc['CARD_IS_DPD_UNDER_120'] = cc['SK_DPD'].apply(lambda x: 1 if(x > 0)&(x < 120)else 0) cc['CARD_IS_DPD_OVER_120'] = cc['SK_DPD'].apply(lambda x: 1 if x >= 120 else 0) cc_agg = cc.groupby('SK_ID_CURR' ).agg(['min', 'max', 'mean', 'sum', 'var']) cc_agg.columns = pd.Index(['CC_' + e[0] + "_" + e[1].upper() for e in cc_agg.columns.tolist() ]) cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR' ).size() last_ids = cc.groupby('SK_ID_PREV')['MONTHS_BALANCE'].idxmax() last_months_df = cc[cc.index.isin(last_ids)] cc_agg = group_and_merge(last_months_df,cc_agg,'CC_LAST_', {'AMT_BALANCE': ['mean', 'max']}) CREDIT_CARD_TIME_AGG = { 'AMT_BALANCE': ['mean', 'max'], 'LIMIT_USE': ['max', 'mean'], 'AMT_CREDIT_LIMIT_ACTUAL':['max'], 'AMT_DRAWINGS_ATM_CURRENT': ['max', 'sum'], 'AMT_DRAWINGS_CURRENT': ['max', 'sum'], 'AMT_DRAWINGS_POS_CURRENT': ['max', 'sum'], 'AMT_INST_MIN_REGULARITY': ['max', 'mean'], 'AMT_PAYMENT_TOTAL_CURRENT': ['max','sum'], 'AMT_TOTAL_RECEIVABLE': ['max', 'mean'], 'CNT_DRAWINGS_ATM_CURRENT': ['max','sum', 'mean'], 'CNT_DRAWINGS_CURRENT': ['max', 'mean', 'sum'], 'CNT_DRAWINGS_POS_CURRENT': ['mean'], 'SK_DPD': ['mean', 'max', 'sum'], 'LIMIT_USE': ['min', 'max'], 'DRAWING_LIMIT_RATIO': ['min', 'max'], 'LATE_PAYMENT': ['mean', 'sum'], 'CARD_IS_DPD_UNDER_120': ['mean', 'sum'], 'CARD_IS_DPD_OVER_120': ['mean', 'sum'] } for months in [12, 24, 48]: cc_prev_id = cc[cc['MONTHS_BALANCE'] >= -months]['SK_ID_PREV'].unique() cc_recent = cc[cc['SK_ID_PREV'].isin(cc_prev_id)] prefix = 'INS_{}M_'.format(months) cc_agg = group_and_merge(cc_recent, cc_agg, prefix, CREDIT_CARD_TIME_AGG) print('"Credit Card Balance" final shape:', cc_agg.shape) return cc_agg
Home Credit Default Risk
16,720,939
test_id = test_data['PhraseId'] test_id<save_to_csv>
def data_post_processing(dataframe): print(f'---=> the DATA POST-PROCESSING is beginning, the dataset has {dataframe.shape[1]} features') index_cols = ['TARGET', 'SK_ID_CURR', 'SK_ID_BUREAU', 'SK_ID_PREV', 'index'] dataframe = dataframe.rename(columns=lambda x: re.sub('[^A-Za-z0-9_]+', '_', x)) print('names of feature are renamed') dataframe = reduce_mem_usage(dataframe) print(f'---=> pandas data types of features in the dataset are converted for a reduced memory usage') noninformative_cols = [] for col in dataframe.columns: if len(dataframe[col].value_counts())< 2: noninformative_cols.append(col) dataframe.drop(noninformative_cols, axis=1, inplace=True) print(f'---=> {dataframe.shape[1]} features are remained after removing non-informative features') feature_num = dataframe.shape[1] auc_limit = 0.7 all_features = dataframe.columns.tolist() selected_feature_df = pd.read_csv('.. /input/homecredit-best-subs/removed_cols_lgbm.csv') selected_features = selected_feature_df.removed_cols.tolist() remained_features = set(all_features ).difference(set(selected_features)) dataframe = dataframe[remained_features] print(f'{feature_num - dataframe.shape[1]} features are eliminated by LightGBM classifier with an {auc_limit} auc score limit in step I') print(f'---=> {dataframe.shape[1]} features are remained after removing features not interesting for LightGBM classifier') start_feats_num = dataframe.shape[1] cat_cols = [col for col in dataframe.columns if 3 < len(dataframe[col].value_counts())< 20 and col not in index_cols] dataframe, _ = risk_groupanizer(dataframe, column_names=cat_cols, upper_limit_ratio=8.1, lower_limit_ratio=8.1) print(f'---=> {dataframe.shape[1] - start_feats_num} features are generated with the risk_groupanizer') print(f'---=> the DATA POST-PROCESSING is ended!, now the dataset has a total {dataframe.shape[1]} features') gc.collect() return dataframe
Home Credit Default Risk
16,720,939
y_pred = np.argmax(model.predict(X_test), axis = -1) submission_df = pd.DataFrame({'PhraseId': test_id, 'Sentiment': y_pred}) submission_df.to_csv('submission.csv', index=False) submission_df.head()<import_modules>
def Kfold_LightGBM(df): print('===============================================', ' ', ' df_subx = pd.read_csv(r'.. /input/homecredit-best-subs/df_subs_3.csv') df_sub = df_subx[['SK_ID_CURR', '23']] df_sub.columns = ['SK_ID_CURR', 'TARGET'] train_df = df[df['TARGET'].notnull() ] test_df = df[df['TARGET'].isnull() ] del df gc.collect() test_df.TARGET = np.where(df_sub.TARGET > 0.75, 1, 0) train_df = pd.concat([train_df, test_df], axis=0) train_df = pd.concat([train_df, test_df], axis=0) train_df = pd.concat([train_df, test_df], axis=0) print(f'Train shape: {train_df.shape}, test shape: {test_df.shape} are loaded.') folds = KFold(n_splits=5, shuffle=True, random_state=2020) oof_preds = np.zeros(train_df.shape[0]) sub_preds = np.zeros(test_df.shape[0]) feats = [f for f in train_df.columns if f not in ['TARGET', 'SK_ID_CURR', 'SK_ID_BUREAU', 'SK_ID_PREV']] print(f'only {len(feats)} features from a total {train_df.shape[1]} features are used for ML analysis') for n_fold,(train_idx, valid_idx)in enumerate(folds.split(train_df[feats], train_df['TARGET'])) : train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx] valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx] clf = LGBMClassifier(nthread=-1, n_estimators=5000, learning_rate=0.01, max_depth=11, num_leaves=58, colsample_bytree=0.613, subsample=0.708, max_bin=407, reg_alpha=3.564, reg_lambda=4.930, min_child_weight=6, min_child_samples=165, silent=-1, verbose=-1,) clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric='auc', verbose=500, early_stopping_rounds=500) oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1] sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits print('Fold %2d AUC : %.6f' %(n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx]))) del clf, train_x, train_y, valid_x, valid_y gc.collect() print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds)) test_df['TARGET'] = sub_preds test_df[['SK_ID_CURR', 'TARGET']].to_csv('submission.csv', index=False) print('a submission file is created' )
Home Credit Default Risk
16,720,939
import os import zipfile import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras.applications import EfficientNetB0 from tensorflow.keras.applications.efficientnet import preprocess_input from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow import keras from tensorflow.keras import Model from tensorflow.keras.optimizers import RMSprop from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt <define_variables>
df = application() df = df.merge(bureau_bb() , how='left', on='SK_ID_CURR') print('--=> df after merge with bureau:', df.shape) df = df.merge(previous_application() , how='left', on='SK_ID_CURR') print('--=> df after merge with previous application:', df.shape) df = df.merge(pos_cash() , how='left', on='SK_ID_CURR') print('--=> df after merge with pos cash :', df.shape) df = df.merge(installment() , how='left', on='SK_ID_CURR') print('--=> df after merge with installments:', df.shape) df = df.merge(credit_card() , how='left', on='SK_ID_CURR') print('--=> df after merge with credit card:', df.shape) df = data_post_processing(df) print('='*50, ' ') print('---=> df final shape:', df.shape, ' <=---', ' ') print('=' * 50) Kfold_LightGBM(df) print('--=> all calculations are done!! <=--' )
Home Credit Default Risk
16,374,355
TRAIN_PATH = ".. /input/dogs-vs-cats-redux-kernels-edition/train.zip" TEST_PATH = ".. /input/dogs-vs-cats-redux-kernels-edition/test.zip" UNZIP_DATA = ".. /kaggle/files/unzipped/" UNZIP_TRAIN = ".. /kaggle/files/unzipped/train" UNZIP_TEST = ".. /kaggle/files/unzipped/test" BATCH_SIZE = 32 SEED = 88888 IMG_SIZE = 224 EPOCHS = 10 <load_pretrained>
avg_bleand_1 = pd.DataFrame() avg_bleand_1['SK_ID_CURR'] = df_subs['SK_ID_CURR'] avg_bleand_1['TARGET'] = 1.0 *(6 *(df_subs['0'] + df_subs['1'] + df_subs['2'] + df_subs['3'] + 2 * df_subs['4'])/ 6 + 3 *(5 * df_subs['5'] + 7 * df_subs['6'] + 1 * df_subs['14'] + 3 * df_subs['19'] + 2 * df_subs['20'] + 4 * df_subs['21'])/ 22 + 25 *(df_subs['7'] + df_subs['8'] + df_subs['9'] + df_subs['10'] + df_subs['11'] + df_subs['12'] + 2 * df_subs['13'] + 4 * df_subs['15'] + 2 * df_subs['16'] + 2 * df_subs['17'] + 4 * df_subs['18'])/ 20)/ 34 avg_bleand_1.head()
Home Credit Default Risk
16,374,355
with zipfile.ZipFile(TRAIN_PATH, 'r')as zipp: zipp.extractall(UNZIP_DATA) print('Done!') with zipfile.ZipFile(TEST_PATH, 'r')as zipp: zipp.extractall(UNZIP_DATA) print('Done!' )<define_variables>
avg_bleand_1.to_csv('submission_806.csv', index=False )
Home Credit Default Risk
16,374,355
training_images_files = os.listdir(".. /kaggle/files/unzipped/train") test_image_files =os.listdir(".. /kaggle/files/unzipped/test") <feature_engineering>
df_subs['25'] = avg_bleand_1['TARGET']
Home Credit Default Risk
16,374,355
<feature_engineering><EOS>
avg_bleand_2 = avg_bleand_1.copy() avg_bleand_2['TARGET'] = 1.0*(7*df_subs['22'] + 12*df_subs['23'] + 16*df_subs['24'] - 5*df_subs['25'])/ 31 avg_bleand_2.to_csv('submission_final.csv', index=False )
Home Credit Default Risk
19,576,670
<SOS> metric: AUC Kaggle data source: home-credit-default-risk<split>
warnings.simplefilter(action='ignore', category=FutureWarning )
Home Credit Default Risk
19,576,670
train_df, valid_df = train_test_split(train_df, test_size =.2, shuffle=True , random_state=SEED) <choose_model_class>
DATA_DIRECTORY = ".. /input/home-credit-default-risk"
Home Credit Default Risk
19,576,670
train_generator = ImageDataGenerator(preprocessing_function=preprocess_input, rotation_range=45, shear_range=0.1, zoom_range=0.2, horizontal_flip=False, width_shift_range=0.1, height_shift_range=0.1, ) train_generator = train_generator.flow_from_dataframe( train_df, UNZIP_TRAIN, x_col='filename', y_col='class', target_size=(IMG_SIZE,IMG_SIZE), batch_size=32, class_mode='binary' ) validation_generator = ImageDataGenerator(preprocessing_function=preprocess_input, ) validation_generator = validation_generator.flow_from_dataframe( valid_df, UNZIP_TRAIN, x_col = 'filename', y_col = 'class', target_size =(IMG_SIZE,IMG_SIZE), batch_size=32, class_mode='binary' ) <choose_model_class>
df_train = pd.read_csv(os.path.join(DATA_DIRECTORY, 'application_train.csv')) df_test = pd.read_csv(os.path.join(DATA_DIRECTORY, 'application_test.csv')) df = df_train.append(df_test) del df_train, df_test; gc.collect()
Home Credit Default Risk
19,576,670
pre_trained_model = EfficientNetB0(input_shape =(IMG_SIZE, IMG_SIZE, 3), include_top = False, weights = 'imagenet') for layer in pre_trained_model.layers: layer.trainable = False <choose_model_class>
df = df[df['AMT_INCOME_TOTAL'] < 20000000] df = df[df['CODE_GENDER'] != 'XNA'] df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True) df['DAYS_LAST_PHONE_CHANGE'].replace(0, np.nan, inplace=True )
Home Credit Default Risk
19,576,670
last_layer = pre_trained_model.get_layer('top_activation') last_output=last_layer.output def create_model(last_output): x=keras.layers.GlobalAveragePooling2D()(last_output) x=keras.layers.BatchNormalization()(x) x=keras.layers.Dense(1, activation='sigmoid' )(x) model = Model(pre_trained_model.input, x) model.compile(optimizer = RMSprop(learning_rate=0.001), loss = 'binary_crossentropy', metrics = ['acc']) return model model=create_model(last_output) <init_hyperparams>
def get_age_group(days_birth): age_years = -days_birth / 365 if age_years < 27: return 1 elif age_years < 40: return 2 elif age_years < 50: return 3 elif age_years < 65: return 4 elif age_years < 99: return 5 else: return 0
Home Credit Default Risk
19,576,670
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=2, verbose=2, factor=0.5, min_delt=0.001, min_lr=0.00001) early_stopping = EarlyStopping( monitor = "val_accuracy", patience = 50, verbose = 2, mode = "max", ) <train_model>
docs = [f for f in df.columns if 'FLAG_DOC' in f] df['DOCUMENT_COUNT'] = df[docs].sum(axis=1) df['NEW_DOC_KURT'] = df[docs].kurtosis(axis=1) df['AGE_RANGE'] = df['DAYS_BIRTH'].apply(lambda x: get_age_group(x))
Home Credit Default Risk
19,576,670
history = model.fit( train_generator, validation_data = validation_generator, epochs = EPOCHS, callbacks = [learning_rate_reduction, early_stopping], )<save_model>
df['EXT_SOURCES_PROD'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3'] df['EXT_SOURCES_WEIGHTED'] = df.EXT_SOURCE_1 * 2 + df.EXT_SOURCE_2 * 1 + df.EXT_SOURCE_3 * 3 np.warnings.filterwarnings('ignore', r'All-NaN(slice|axis)encountered') for function_name in ['min', 'max', 'mean', 'nanmedian', 'var']: feature_name = 'EXT_SOURCES_{}'.format(function_name.upper()) df[feature_name] = eval('np.{}'.format(function_name))( df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']], axis=1 )
Home Credit Default Risk
19,576,670
model.save('./dog_cat_model' )<predict_on_test>
df['CREDIT_TO_ANNUITY_RATIO'] = df['AMT_CREDIT'] / df['AMT_ANNUITY'] df['CREDIT_TO_GOODS_RATIO'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE'] df['ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL'] df['CREDIT_TO_INCOME_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL'] df['INCOME_TO_EMPLOYED_RATIO'] = df['AMT_INCOME_TOTAL'] / df['DAYS_EMPLOYED'] df['INCOME_TO_BIRTH_RATIO'] = df['AMT_INCOME_TOTAL'] / df['DAYS_BIRTH'] df['EMPLOYED_TO_BIRTH_RATIO'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH'] df['ID_TO_BIRTH_RATIO'] = df['DAYS_ID_PUBLISH'] / df['DAYS_BIRTH'] df['CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH'] df['CAR_TO_EMPLOYED_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED'] df['PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
Home Credit Default Risk
19,576,670
test_gen = ImageDataGenerator(preprocessing_function=preprocess_input) test_generator = test_gen.flow_from_dataframe( test_df, UNZIP_TEST, x_col='filename', class_mode= None, target_size=(IMG_SIZE,IMG_SIZE), batch_size=BATCH_SIZE, shuffle=False ) predict = model.predict(test_generator, verbose = 1 )<feature_engineering>
def do_mean(df, group_cols, counted, agg_name): gp = df[group_cols + [counted]].groupby(group_cols)[counted].mean().reset_index().rename( columns={counted: agg_name}) df = df.merge(gp, on=group_cols, how='left') del gp gc.collect() return df
Home Credit Default Risk
19,576,670
test_df["predict"] = predict test_df["label"] = test_df["predict"] result = test_df[["id", "label"]]<save_to_csv>
def do_median(df, group_cols, counted, agg_name): gp = df[group_cols + [counted]].groupby(group_cols)[counted].median().reset_index().rename( columns={counted: agg_name}) df = df.merge(gp, on=group_cols, how='left') del gp gc.collect() return df
Home Credit Default Risk
19,576,670
result.to_csv('submission.csv', index=False )<import_modules>
def do_std(df, group_cols, counted, agg_name): gp = df[group_cols + [counted]].groupby(group_cols)[counted].std().reset_index().rename( columns={counted: agg_name}) df = df.merge(gp, on=group_cols, how='left') del gp gc.collect() return df
Home Credit Default Risk
19,576,670
for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) <load_from_csv>
def do_sum(df, group_cols, counted, agg_name): gp = df[group_cols + [counted]].groupby(group_cols)[counted].sum().reset_index().rename( columns={counted: agg_name}) df = df.merge(gp, on=group_cols, how='left') del gp gc.collect() return df
Home Credit Default Risk
19,576,670
df_train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv") df_test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv" )<feature_engineering>
group = ['ORGANIZATION_TYPE', 'NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'AGE_RANGE', 'CODE_GENDER'] df = do_median(df, group, 'EXT_SOURCES_MEAN', 'GROUP_EXT_SOURCES_MEDIAN') df = do_std(df, group, 'EXT_SOURCES_MEAN', 'GROUP_EXT_SOURCES_STD') df = do_mean(df, group, 'AMT_INCOME_TOTAL', 'GROUP_INCOME_MEAN') df = do_std(df, group, 'AMT_INCOME_TOTAL', 'GROUP_INCOME_STD') df = do_mean(df, group, 'CREDIT_TO_ANNUITY_RATIO', 'GROUP_CREDIT_TO_ANNUITY_MEAN') df = do_std(df, group, 'CREDIT_TO_ANNUITY_RATIO', 'GROUP_CREDIT_TO_ANNUITY_STD') df = do_mean(df, group, 'AMT_CREDIT', 'GROUP_CREDIT_MEAN') df = do_mean(df, group, 'AMT_ANNUITY', 'GROUP_ANNUITY_MEAN') df = do_std(df, group, 'AMT_ANNUITY', 'GROUP_ANNUITY_STD' )
Home Credit Default Risk
19,576,670
def format_keyword(df): df["keyword"] = df["keyword"].fillna(".") df["keyword"] = df.keyword.str.replace("%20"," " )<count_values>
def label_encoder(df, categorical_columns=None): if not categorical_columns: categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] for col in categorical_columns: df[col], uniques = pd.factorize(df[col]) return df, categorical_columns
Home Credit Default Risk
19,576,670
df_train.loc[df_train.target==0]["keyword"].value_counts()<string_transform>
def drop_application_columns(df): drop_list = [ 'CNT_CHILDREN', 'CNT_FAM_MEMBERS', 'HOUR_APPR_PROCESS_START', 'FLAG_EMP_PHONE', 'FLAG_MOBIL', 'FLAG_CONT_MOBILE', 'FLAG_EMAIL', 'FLAG_PHONE', 'FLAG_OWN_REALTY', 'REG_REGION_NOT_LIVE_REGION', 'REG_REGION_NOT_WORK_REGION', 'REG_CITY_NOT_WORK_CITY', 'OBS_30_CNT_SOCIAL_CIRCLE', 'OBS_60_CNT_SOCIAL_CIRCLE', 'AMT_REQ_CREDIT_BUREAU_DAY', 'AMT_REQ_CREDIT_BUREAU_MON', 'AMT_REQ_CREDIT_BUREAU_YEAR', 'COMMONAREA_MODE', 'NONLIVINGAREA_MODE', 'ELEVATORS_MODE', 'NONLIVINGAREA_AVG', 'FLOORSMIN_MEDI', 'LANDAREA_MODE', 'NONLIVINGAREA_MEDI', 'LIVINGAPARTMENTS_MODE', 'FLOORSMIN_AVG', 'LANDAREA_AVG', 'FLOORSMIN_MODE', 'LANDAREA_MEDI', 'COMMONAREA_MEDI', 'YEARS_BUILD_AVG', 'COMMONAREA_AVG', 'BASEMENTAREA_AVG', 'BASEMENTAREA_MODE', 'NONLIVINGAPARTMENTS_MEDI', 'BASEMENTAREA_MEDI', 'LIVINGAPARTMENTS_AVG', 'ELEVATORS_AVG', 'YEARS_BUILD_MEDI', 'ENTRANCES_MODE', 'NONLIVINGAPARTMENTS_MODE', 'LIVINGAREA_MODE', 'LIVINGAPARTMENTS_MEDI', 'YEARS_BUILD_MODE', 'YEARS_BEGINEXPLUATATION_AVG', 'ELEVATORS_MEDI', 'LIVINGAREA_MEDI', 'YEARS_BEGINEXPLUATATION_MODE', 'NONLIVINGAPARTMENTS_AVG', 'HOUSETYPE_MODE', 'FONDKAPREMONT_MODE', 'EMERGENCYSTATE_MODE' ] for doc_num in [2,4,5,6,7,9,10,11,12,13,14,15,16,17,19,20,21]: drop_list.append('FLAG_DOCUMENT_{}'.format(doc_num)) df.drop(drop_list, axis=1, inplace=True) return df
Home Credit Default Risk
19,576,670
df_count = df_train.text.str.split().str.len() max(df_count )<categorify>
df, le_encoded_cols = label_encoder(df, None) df = drop_application_columns(df )
Home Credit Default Risk
19,576,670
def process_text(text): text=text.replace(" ","") text = re.sub(r'@\S+','',text) text = re.sub(r' text = re.sub(r'https?://\S+|www\.\S+|http?://\S+','',text) text = re.sub('[%s]' % re.escape (<feature_engineering>
df = pd.get_dummies(df )
Home Credit Default Risk
19,576,670
df_train["text"] = df_train.text.transform(lambda x: process_text(x)) df_test["text"] = df_test.text.transform(lambda x: process_text(x))<categorify>
bureau = pd.read_csv(os.path.join(DATA_DIRECTORY, 'bureau.csv'))
Home Credit Default Risk
19,576,670
df_train["appears"]=df_train.groupby("text" ).text.transform("count" )<feature_engineering>
bureau['CREDIT_DURATION'] = -bureau['DAYS_CREDIT'] + bureau['DAYS_CREDIT_ENDDATE'] bureau['ENDDATE_DIF'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT'] bureau['DEBT_PERCENTAGE'] = bureau['AMT_CREDIT_SUM'] / bureau['AMT_CREDIT_SUM_DEBT'] bureau['DEBT_CREDIT_DIFF'] = bureau['AMT_CREDIT_SUM'] - bureau['AMT_CREDIT_SUM_DEBT'] bureau['CREDIT_TO_ANNUITY_RATIO'] = bureau['AMT_CREDIT_SUM'] / bureau['AMT_ANNUITY']
Home Credit Default Risk
19,576,670
df_train["target_std"]=df_train.groupby("text" ).target.transform(np.std) df_train["target_mean"]=df_train.groupby("text" ).target.transform(np.mean )<sort_values>
def one_hot_encoder(df, categorical_columns=None, nan_as_category=True): original_columns = list(df.columns) if not categorical_columns: categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category) categorical_columns = [c for c in df.columns if c not in original_columns] return df, categorical_columns
Home Credit Default Risk
19,576,670
duplicate_ids = df_train.loc[df_train.target_std>0].sort_values(by=["appears","text"],ascending=False ).index<drop_column>
def group(df_to_agg, prefix, aggregations, aggregate_by= 'SK_ID_CURR'): agg_df = df_to_agg.groupby(aggregate_by ).agg(aggregations) agg_df.columns = pd.Index(['{}{}_{}'.format(prefix, e[0], e[1].upper()) for e in agg_df.columns.tolist() ]) return agg_df.reset_index()
Home Credit Default Risk
19,576,670
df_train = df_train.drop(index = duplicate_ids )<remove_duplicates>
def group_and_merge(df_to_agg, df_to_merge, prefix, aggregations, aggregate_by= 'SK_ID_CURR'): agg_df = group(df_to_agg, prefix, aggregations, aggregate_by= aggregate_by) return df_to_merge.merge(agg_df, how='left', on= aggregate_by )
Home Credit Default Risk
19,576,670
df_train = df_train.drop_duplicates(subset=["text"] )<drop_column>
def get_bureau_balance(path, num_rows= None): bb = pd.read_csv(os.path.join(path, 'bureau_balance.csv')) bb, categorical_cols = one_hot_encoder(bb, nan_as_category= False) bb_processed = bb.groupby('SK_ID_BUREAU')[categorical_cols].mean().reset_index() agg = {'MONTHS_BALANCE': ['min', 'max', 'mean', 'size']} bb_processed = group_and_merge(bb, bb_processed, '', agg, 'SK_ID_BUREAU') del bb; gc.collect() return bb_processed
Home Credit Default Risk
19,576,670
df_train.reset_index(drop=True,inplace=True) df_train<feature_engineering>
bureau, categorical_cols = one_hot_encoder(bureau, nan_as_category= False) bureau = bureau.merge(get_bureau_balance(DATA_DIRECTORY), how='left', on='SK_ID_BUREAU') bureau['STATUS_12345'] = 0 for i in range(1,6): bureau['STATUS_12345'] += bureau['STATUS_{}'.format(i)]
Home Credit Default Risk
19,576,670
nlp = spacy.load("en_core_web_lg") keyword_train = np.array([nlp(text ).vector for text in df_train.keyword]) keyword_test = np.array([nlp(text ).vector for text in df_test.keyword] )<feature_engineering>
features = ['AMT_CREDIT_MAX_OVERDUE', 'AMT_CREDIT_SUM_OVERDUE', 'AMT_CREDIT_SUM', 'AMT_CREDIT_SUM_DEBT', 'DEBT_PERCENTAGE', 'DEBT_CREDIT_DIFF', 'STATUS_0', 'STATUS_12345'] agg_length = bureau.groupby('MONTHS_BALANCE_SIZE')[features].mean().reset_index() agg_length.rename({feat: 'LL_' + feat for feat in features}, axis=1, inplace=True) bureau = bureau.merge(agg_length, how='left', on='MONTHS_BALANCE_SIZE') del agg_length; gc.collect()
Home Credit Default Risk
19,576,670
def nlp_vectors(text): res = [] doc = nlp(text) for token in doc: if not token.is_space: res.append(token.vector) return res def build_nlp_vectors(df_text): spacy_vectors =([nlp_vectors(text)for text in df_text]) max_length = 0; for vector in spacy_vectors: max_length = max(max_length, len(vector)) print(f"Maximum Length:{ max_length}") for i in range(len(spacy_vectors)) : while(len(spacy_vectors[i])<max_length): spacy_vectors[i].append([0]*300) spacy_vectors = np.array(spacy_vectors) print(f"Shape of spacy vector:{spacy_vectors.shape}") return spacy_vectors <feature_engineering>
BUREAU_AGG = { 'SK_ID_BUREAU': ['nunique'], 'DAYS_CREDIT': ['min', 'max', 'mean'], 'DAYS_CREDIT_ENDDATE': ['min', 'max'], 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_OVERDUE': ['max', 'mean', 'sum'], 'AMT_ANNUITY': ['mean'], 'DEBT_CREDIT_DIFF': ['mean', 'sum'], 'MONTHS_BALANCE_MEAN': ['mean', 'var'], 'MONTHS_BALANCE_SIZE': ['mean', 'sum'], 'STATUS_0': ['mean'], 'STATUS_1': ['mean'], 'STATUS_12345': ['mean'], 'STATUS_C': ['mean'], 'STATUS_X': ['mean'], 'CREDIT_ACTIVE_Active': ['mean'], 'CREDIT_ACTIVE_Closed': ['mean'], 'CREDIT_ACTIVE_Sold': ['mean'], 'CREDIT_TYPE_Consumer credit': ['mean'], 'CREDIT_TYPE_Credit card': ['mean'], 'CREDIT_TYPE_Car loan': ['mean'], 'CREDIT_TYPE_Mortgage': ['mean'], 'CREDIT_TYPE_Microloan': ['mean'], 'LL_AMT_CREDIT_SUM_OVERDUE': ['mean'], 'LL_DEBT_CREDIT_DIFF': ['mean'], 'LL_STATUS_12345': ['mean'], } BUREAU_ACTIVE_AGG = { 'DAYS_CREDIT': ['max', 'mean'], 'DAYS_CREDIT_ENDDATE': ['min', 'max'], 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM': ['max', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['mean', 'sum'], 'AMT_CREDIT_SUM_OVERDUE': ['max', 'mean'], 'DAYS_CREDIT_UPDATE': ['min', 'mean'], 'DEBT_PERCENTAGE': ['mean'], 'DEBT_CREDIT_DIFF': ['mean'], 'CREDIT_TO_ANNUITY_RATIO': ['mean'], 'MONTHS_BALANCE_MEAN': ['mean', 'var'], 'MONTHS_BALANCE_SIZE': ['mean', 'sum'], } BUREAU_CLOSED_AGG = { 'DAYS_CREDIT': ['max', 'var'], 'DAYS_CREDIT_ENDDATE': ['max'], 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM_OVERDUE': ['mean'], 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['max', 'sum'], 'DAYS_CREDIT_UPDATE': ['max'], 'ENDDATE_DIF': ['mean'], 'STATUS_12345': ['mean'], } BUREAU_LOAN_TYPE_AGG = { 'DAYS_CREDIT': ['mean', 'max'], 'AMT_CREDIT_MAX_OVERDUE': ['mean', 'max'], 'AMT_CREDIT_SUM': ['mean', 'max'], 'AMT_CREDIT_SUM_DEBT': ['mean', 'max'], 'DEBT_PERCENTAGE': ['mean'], 'DEBT_CREDIT_DIFF': ['mean'], 'DAYS_CREDIT_ENDDATE': ['max'], } BUREAU_TIME_AGG = { 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM_OVERDUE': ['mean'], 'AMT_CREDIT_SUM': ['max', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['mean', 'sum'], 'DEBT_PERCENTAGE': ['mean'], 'DEBT_CREDIT_DIFF': ['mean'], 'STATUS_0': ['mean'], 'STATUS_12345': ['mean'], }
Home Credit Default Risk
19,576,670
nlp_train = build_nlp_vectors(df_train.text )<load_pretrained>
agg_bureau = group(bureau, 'BUREAU_', BUREAU_AGG) active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1] agg_bureau = group_and_merge(active,agg_bureau,'BUREAU_ACTIVE_',BUREAU_ACTIVE_AGG) closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1] agg_bureau = group_and_merge(closed,agg_bureau,'BUREAU_CLOSED_',BUREAU_CLOSED_AGG) del active, closed; gc.collect() for credit_type in ['Consumer credit', 'Credit card', 'Mortgage', 'Car loan', 'Microloan']: type_df = bureau[bureau['CREDIT_TYPE_' + credit_type] == 1] prefix = 'BUREAU_' + credit_type.split(' ')[0].upper() + '_' agg_bureau = group_and_merge(type_df, agg_bureau, prefix, BUREAU_LOAN_TYPE_AGG) del type_df; gc.collect() for time_frame in [6, 12]: prefix = "BUREAU_LAST{}M_".format(time_frame) time_frame_df = bureau[bureau['DAYS_CREDIT'] >= -30*time_frame] agg_bureau = group_and_merge(time_frame_df, agg_bureau, prefix, BUREAU_TIME_AGG) del time_frame_df; gc.collect()
Home Credit Default Risk