kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
19,576,670 | tokenizer = ppb.DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
bert_model = ppb.DistilBertModel.from_pretrained("distilbert-base-uncased" )<define_variables> | sort_bureau = bureau.sort_values(by=['DAYS_CREDIT'])
gr = sort_bureau.groupby('SK_ID_CURR')['AMT_CREDIT_MAX_OVERDUE'].last().reset_index()
gr.rename({'AMT_CREDIT_MAX_OVERDUE': 'BUREAU_LAST_LOAN_MAX_OVERDUE'}, inplace=True)
agg_bureau = agg_bureau.merge(gr, on='SK_ID_CURR', how='left')
agg_bureau['BUREAU_DEBT_OVER_CREDIT'] = \
agg_bureau['BUREAU_AMT_CREDIT_SUM_DEBT_SUM']/agg_bureau['BUREAU_AMT_CREDIT_SUM_SUM']
agg_bureau['BUREAU_ACTIVE_DEBT_OVER_CREDIT'] = \
agg_bureau['BUREAU_ACTIVE_AMT_CREDIT_SUM_DEBT_SUM']/agg_bureau['BUREAU_ACTIVE_AMT_CREDIT_SUM_SUM'] | Home Credit Default Risk |
19,576,670 | def process_data(df_text):
tokens = df_text.apply(lambda text: tokenizer.encode(text,add_special_tokens=True))
max_len = 0;
i = 0;
for token in tokens.values:
max_len = max(max_len,len(token))
print(f"Max Length: {max_len}")
padded = np.array([i+[0]*(max_len-len(i)) for i in tokens.values])
attention_mask = np.where(padded !=0, 1,0)
input_ids = torch.tensor(padded)
attention_mask = torch.tensor(attention_mask)
with torch.no_grad() :
last_hidden_states = bert_model(input_ids,attention_mask=attention_mask)
X = last_hidden_states[0][:,0,:].numpy()
print(X.shape)
return X
<prepare_x_and_y> | df = pd.merge(df, agg_bureau, on='SK_ID_CURR', how='left')
del agg_bureau, bureau
gc.collect() | Home Credit Default Risk |
19,576,670 | X_train = process_data(df_train.text )<prepare_x_and_y> | prev = pd.read_csv(os.path.join(DATA_DIRECTORY, 'previous_application.csv'))
pay = pd.read_csv(os.path.join(DATA_DIRECTORY, 'installments_payments.csv')) | Home Credit Default Risk |
19,576,670 | y_train = df_train.target<split> | PREVIOUS_AGG = {
'SK_ID_PREV': ['nunique'],
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_DOWN_PAYMENT': ['max', 'mean'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],
'RATE_DOWN_PAYMENT': ['max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['max', 'mean'],
'DAYS_TERMINATION': ['max'],
'CREDIT_TO_ANNUITY_RATIO': ['mean', 'max'],
'APPLICATION_CREDIT_DIFF': ['min', 'max', 'mean'],
'APPLICATION_CREDIT_RATIO': ['min', 'max', 'mean', 'var'],
'DOWN_PAYMENT_TO_CREDIT': ['mean'],
}
PREVIOUS_ACTIVE_AGG = {
'SK_ID_PREV': ['nunique'],
'SIMPLE_INTERESTS': ['mean'],
'AMT_ANNUITY': ['max', 'sum'],
'AMT_APPLICATION': ['max', 'mean'],
'AMT_CREDIT': ['sum'],
'AMT_DOWN_PAYMENT': ['max', 'mean'],
'DAYS_DECISION': ['min', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
'DAYS_LAST_DUE_1ST_VERSION': ['min', 'max', 'mean'],
'AMT_PAYMENT': ['sum'],
'INSTALMENT_PAYMENT_DIFF': ['mean', 'max'],
'REMAINING_DEBT': ['max', 'mean', 'sum'],
'REPAYMENT_RATIO': ['mean'],
}
PREVIOUS_LATE_PAYMENTS_AGG = {
'DAYS_DECISION': ['min', 'max', 'mean'],
'DAYS_LAST_DUE_1ST_VERSION': ['min', 'max', 'mean'],
'APPLICATION_CREDIT_DIFF': ['min'],
'NAME_CONTRACT_TYPE_Consumer loans': ['mean'],
'NAME_CONTRACT_TYPE_Cash loans': ['mean'],
'NAME_CONTRACT_TYPE_Revolving loans': ['mean'],
}
PREVIOUS_LOAN_TYPE_AGG = {
'AMT_CREDIT': ['sum'],
'AMT_ANNUITY': ['mean', 'max'],
'SIMPLE_INTERESTS': ['min', 'mean', 'max', 'var'],
'APPLICATION_CREDIT_DIFF': ['min', 'var'],
'APPLICATION_CREDIT_RATIO': ['min', 'max', 'mean'],
'DAYS_DECISION': ['max'],
'DAYS_LAST_DUE_1ST_VERSION': ['max', 'mean'],
'CNT_PAYMENT': ['mean'],
}
PREVIOUS_TIME_AGG = {
'AMT_CREDIT': ['sum'],
'AMT_ANNUITY': ['mean', 'max'],
'SIMPLE_INTERESTS': ['mean', 'max'],
'DAYS_DECISION': ['min', 'mean'],
'DAYS_LAST_DUE_1ST_VERSION': ['min', 'max', 'mean'],
'APPLICATION_CREDIT_DIFF': ['min'],
'APPLICATION_CREDIT_RATIO': ['min', 'max', 'mean'],
'NAME_CONTRACT_TYPE_Consumer loans': ['mean'],
'NAME_CONTRACT_TYPE_Cash loans': ['mean'],
'NAME_CONTRACT_TYPE_Revolving loans': ['mean'],
}
PREVIOUS_APPROVED_AGG = {
'SK_ID_PREV': ['nunique'],
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_CREDIT': ['min', 'max', 'mean'],
'AMT_DOWN_PAYMENT': ['max'],
'AMT_GOODS_PRICE': ['max'],
'HOUR_APPR_PROCESS_START': ['min', 'max'],
'DAYS_DECISION': ['min', 'mean'],
'CNT_PAYMENT': ['max', 'mean'],
'DAYS_TERMINATION': ['mean'],
'CREDIT_TO_ANNUITY_RATIO': ['mean', 'max'],
'APPLICATION_CREDIT_DIFF': ['max'],
'APPLICATION_CREDIT_RATIO': ['min', 'max', 'mean'],
'DAYS_FIRST_DRAWING': ['max', 'mean'],
'DAYS_FIRST_DUE': ['min', 'mean'],
'DAYS_LAST_DUE_1ST_VERSION': ['min', 'max', 'mean'],
'DAYS_LAST_DUE': ['max', 'mean'],
'DAYS_LAST_DUE_DIFF': ['min', 'max', 'mean'],
'SIMPLE_INTERESTS': ['min', 'max', 'mean'],
}
PREVIOUS_REFUSED_AGG = {
'AMT_APPLICATION': ['max', 'mean'],
'AMT_CREDIT': ['min', 'max'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['max', 'mean'],
'APPLICATION_CREDIT_DIFF': ['min', 'max', 'mean', 'var'],
'APPLICATION_CREDIT_RATIO': ['min', 'mean'],
'NAME_CONTRACT_TYPE_Consumer loans': ['mean'],
'NAME_CONTRACT_TYPE_Cash loans': ['mean'],
'NAME_CONTRACT_TYPE_Revolving loans': ['mean'],
}
| Home Credit Default Risk |
19,576,670 | X_tr, X_val, nlp_tr, nlp_val, kw_tr, kw_val, y_tr, y_val = train_test_split(X_train,nlp_train, keyword_train, y_train, test_size=0.25, train_size=0.75,shuffle=True )<choose_model_class> | ohe_columns = [
'NAME_CONTRACT_STATUS', 'NAME_CONTRACT_TYPE', 'CHANNEL_TYPE',
'NAME_TYPE_SUITE', 'NAME_YIELD_GROUP', 'PRODUCT_COMBINATION',
'NAME_PRODUCT_TYPE', 'NAME_CLIENT_TYPE']
prev, categorical_cols = one_hot_encoder(prev, ohe_columns, nan_as_category= False ) | Home Credit Default Risk |
19,576,670 | def build_nn() :
model = tf.keras.Sequential()
model.add(layers.Input(shape=(768,)))
model.add(layers.Dense(128,activation='tanh'))
model.add(layers.Dropout(0.6))
model.add(layers.Dense(32,activation='tanh'))
model.add(layers.Dropout(0.6))
model.add(layers.Dense(8,activation='tanh'))
model.add(layers.Dense(1,activation='sigmoid'))
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['accuracy'])
return model<choose_model_class> | prev['APPLICATION_CREDIT_DIFF'] = prev['AMT_APPLICATION'] - prev['AMT_CREDIT']
prev['APPLICATION_CREDIT_RATIO'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']
prev['CREDIT_TO_ANNUITY_RATIO'] = prev['AMT_CREDIT']/prev['AMT_ANNUITY']
prev['DOWN_PAYMENT_TO_CREDIT'] = prev['AMT_DOWN_PAYMENT'] / prev['AMT_CREDIT']
total_payment = prev['AMT_ANNUITY'] * prev['CNT_PAYMENT']
prev['SIMPLE_INTERESTS'] =(total_payment/prev['AMT_CREDIT'] - 1)/prev['CNT_PAYMENT'] | Home Credit Default Risk |
19,576,670 | kfold = KFold(n_splits=4, shuffle=True, random_state=1 )<compute_test_metric> | approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
active_df = approved[approved['DAYS_LAST_DUE'] == 365243]
active_pay = pay[pay['SK_ID_PREV'].isin(active_df['SK_ID_PREV'])]
active_pay_agg = active_pay.groupby('SK_ID_PREV')[['AMT_INSTALMENT', 'AMT_PAYMENT']].sum()
active_pay_agg.reset_index(inplace= True)
active_pay_agg['INSTALMENT_PAYMENT_DIFF'] = active_pay_agg['AMT_INSTALMENT'] - active_pay_agg['AMT_PAYMENT']
active_df = active_df.merge(active_pay_agg, on= 'SK_ID_PREV', how= 'left')
active_df['REMAINING_DEBT'] = active_df['AMT_CREDIT'] - active_df['AMT_PAYMENT']
active_df['REPAYMENT_RATIO'] = active_df['AMT_PAYMENT'] / active_df['AMT_CREDIT']
active_agg_df = group(active_df, 'PREV_ACTIVE_', PREVIOUS_ACTIVE_AGG)
active_agg_df['TOTAL_REPAYMENT_RATIO'] = active_agg_df['PREV_ACTIVE_AMT_PAYMENT_SUM']/\
active_agg_df['PREV_ACTIVE_AMT_CREDIT_SUM']
del active_pay, active_pay_agg, active_df; gc.collect() | Home Credit Default Risk |
19,576,670 | def eval_f1_score(X_val, y_val, model):
pred_val =(model.predict(X_val)>0.5)
f1 = f1_score(y_val,pred_val)
return f1<define_variables> | prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True ) | Home Credit Default Risk |
19,576,670 | EPOCHS = 100
BATCH_SIZE = 64<train_model> | prev['DAYS_LAST_DUE_DIFF'] = prev['DAYS_LAST_DUE_1ST_VERSION'] - prev['DAYS_LAST_DUE']
approved['DAYS_LAST_DUE_DIFF'] = approved['DAYS_LAST_DUE_1ST_VERSION'] - approved['DAYS_LAST_DUE'] | Home Credit Default Risk |
19,576,670 | fold = 0
history_by_fold = []
cv_results = []
for train,val in kfold.split(X_train,y_train):
nn_model = build_nn()
history = nn_model.fit(X_train[train],y_train[train],
validation_data=(X_train[val],y_train[val]),
epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=0)
scores = nn_model.evaluate(X_train[val],y_train[val],verbose=0)
print(f"-- Fold {fold} -- ")
print(f"{nn_model.metrics_names[0]}: {scores[0]}")
print(f"{nn_model.metrics_names[1]}: {scores[1]}")
print(f"F1 Score: {eval_f1_score(X_train[val],y_train[val],nn_model)}")
cv_results.append(scores[1])
history_by_fold.append(history)
fold+=1
print(f"{np.mean(cv_results)} +\- {np.std(cv_results)}")
plot_history(history )<train_model> | categorical_agg = {key: ['mean'] for key in categorical_cols} | Home Credit Default Risk |
19,576,670 | nn_model = build_nn()
history = nn_model.fit(X_tr,y_tr, validation_data=(X_val,y_val),
epochs=EPOCHS, batch_size=BATCH_SIZE,verbose=0)
scores= nn_model.evaluate(X_val,y_val,verbose=0)
print(f"Accuracy: {scores[1]}")
print(f"F1 Score: {eval_f1_score(X_val,y_val,nn_model)}" )<choose_model_class> | agg_prev = group(prev, 'PREV_', {**PREVIOUS_AGG, **categorical_agg})
agg_prev = agg_prev.merge(active_agg_df, how='left', on='SK_ID_CURR')
del active_agg_df; gc.collect() | Home Credit Default Risk |
19,576,670 | def build_LSTM() :
lstm_model = tf.keras.Sequential()
lstm_model.add(layers.Input(shape=(None,300)))
lstm_model.add(layers.LSTM(16))
lstm_model.add(layers.Dense(8, activation="tanh"))
lstm_model.add(layers.Dense(8, activation="tanh"))
lstm_model.add(layers.Dense(1,activation="sigmoid"))
lstm_model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(5e-5),
metrics=['accuracy'])
return lstm_model<define_variables> | agg_prev = group_and_merge(approved, agg_prev, 'APPROVED_', PREVIOUS_APPROVED_AGG)
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
agg_prev = group_and_merge(refused, agg_prev, 'REFUSED_', PREVIOUS_REFUSED_AGG)
del approved, refused; gc.collect() | Home Credit Default Risk |
19,576,670 | EPOCHS = 30;
BATCH_SIZE = 64;<choose_model_class> | for loan_type in ['Consumer loans', 'Cash loans']:
type_df = prev[prev['NAME_CONTRACT_TYPE_{}'.format(loan_type)] == 1]
prefix = 'PREV_' + loan_type.split(" ")[0] + '_'
agg_prev = group_and_merge(type_df, agg_prev, prefix, PREVIOUS_LOAN_TYPE_AGG)
del type_df; gc.collect() | Home Credit Default Risk |
19,576,670 | kfold = KFold(n_splits=4, shuffle=True, random_state=1 )<train_model> | pay['LATE_PAYMENT'] = pay['DAYS_ENTRY_PAYMENT'] - pay['DAYS_INSTALMENT']
pay['LATE_PAYMENT'] = pay['LATE_PAYMENT'].apply(lambda x: 1 if x > 0 else 0)
dpd_id = pay[pay['LATE_PAYMENT'] > 0]['SK_ID_PREV'].unique() | Home Credit Default Risk |
19,576,670 | fold = 0
history_by_fold = []
cv_results = []
for train, val in kfold.split(nlp_train,y_train):
lstm_model = build_LSTM()
history = lstm_model.fit(nlp_train[train],y_train[train],
validation_data=(nlp_train[val],y_train[val]),
epochs=EPOCHS,batch_size=BATCH_SIZE,verbose=0)
scores = lstm_model.evaluate(nlp_train[val],y_train[val],verbose=0)
print(f"-- Fold{fold} --")
print(f"{lstm_model.metrics_names[0]}: {scores[0]}")
print(f"{lstm_model.metrics_names[1]}: {scores[1]}")
print(f"F1 Score: {eval_f1_score(nlp_train[val],y_train[val],lstm_model)}")
cv_results.append(scores[1])
history_by_fold.append(history)
fold+=1
print(f"{np.mean(cv_results)} +\- {np.std(cv_results)}" )<train_model> | agg_dpd = group_and_merge(prev[prev['SK_ID_PREV'].isin(dpd_id)], agg_prev,
'PREV_LATE_', PREVIOUS_LATE_PAYMENTS_AGG)
del agg_dpd, dpd_id; gc.collect() | Home Credit Default Risk |
19,576,670 | lstm_model = build_LSTM()
history = lstm_model.fit(nlp_tr,y_tr,validation_data=(nlp_val,y_val), epochs=EPOCHS, batch_size=BATCH_SIZE )<predict_on_test> | df = pd.merge(df, agg_prev, on='SK_ID_CURR', how='left' ) | Home Credit Default Risk |
19,576,670 | valid_predict =(lstm_model.predict(nlp_val)> 0.5)
f1 = f1_score(y_val, valid_predict)
print(f" F1 Score: {f1}" )<compute_test_metric> | train = df[df['TARGET'].notnull() ]
test = df[df['TARGET'].isnull() ]
del df
del agg_prev
gc.collect() | Home Credit Default Risk |
19,576,670 | lr_keywords = LogisticRegression(max_iter=500)
lr_keywords.fit(kw_tr,y_tr)
val_pred = lr_keywords.predict(kw_val)
print(f"Accurcay: {accuracy_score(y_val, val_pred)}")
print(f"F1 score: {f1_score(y_val,val_pred)}" )<predict_on_test> | labels = train['TARGET']
test_lebels=test['TARGET']
train = train.drop(columns=['TARGET'])
test = test.drop(columns=['TARGET'] ) | Home Credit Default Risk |
19,576,670 | nn_tr_predict = nn_model.predict(X_tr)
kw_tr_predict = lr_keywords.predict_proba(kw_tr)[:,1]
lstm_tr_predict = lstm_model.predict(nlp_tr)
nn_val_predict = nn_model.predict(X_val)
kw_val_predict = lr_keywords.predict_proba(kw_val)[:,1]
lstm_val_predict = lstm_model.predict(nlp_val)
kw_tr_predict = kw_tr_predict.reshape(( kw_tr_predict.shape[0],1))
kw_val_predict = kw_val_predict.reshape(( kw_val_predict.shape[0],1))
concat_tr = np.concatenate(( nn_tr_predict, kw_tr_predict, lstm_tr_predict), axis=1)
concat_val = np.concatenate(( nn_val_predict, kw_val_predict, lstm_val_predict), axis=1)
<compute_train_metric> | feature = list(train.columns)
train.replace([np.inf, -np.inf], np.nan, inplace=True)
test.replace([np.inf, -np.inf], np.nan, inplace=True)
test_df = test.copy()
train_df = train.copy()
train_df['TARGET'] = labels
test_df['TARGET'] = test_lebels | Home Credit Default Risk |
19,576,670 | lr = LogisticRegression()
lr.fit(concat_tr,y_tr)
val_pred = lr.predict(concat_val)
print(f"Accurcay: {accuracy_score(y_val, val_pred)}")
print(f"F1 score: {f1_score(y_val,val_pred)}" )<prepare_x_and_y> | imputer = SimpleImputer(strategy = 'median')
imputer.fit(train)
imputer.fit(test)
train1 = imputer.transform(train)
test1 = imputer.transform(test)
del train
del test
gc.collect() | Home Credit Default Risk |
19,576,670 | X_test = process_data(df_test.text )<feature_engineering> | scaler = MinMaxScaler(feature_range =(0, 1))
scaler.fit(train1)
scaler.fit(test1)
train = scaler.transform(train1)
test = scaler.transform(test1)
del train1
del test1
gc.collect() | Home Credit Default Risk |
19,576,670 | nlp_test = build_nlp_vectors(df_test.text )<predict_on_test> | from keras.models import Sequential
from keras.layers import Dense | Home Credit Default Risk |
19,576,670 | df_test["nn_predict"]= nn_model.predict(X_test)
df_test["lstm_predict"]= lstm_model.predict(nlp_test)
df_test["keyword_predict"] = lr_keywords.predict_proba(keyword_test)[:,1]
features = ["nn_predict","keyword_predict","lstm_predict"]
test_features = df_test[features]
predict = lr.predict(test_features )<save_to_csv> | model_2 = Sequential([
Dense(1000, activation='relu', input_shape=(461,)) ,
Dense(1000, activation='relu'),
Dense(1000, activation='relu'),
Dense(1000, activation='relu'),
Dense(1, activation='sigmoid'),
])
model_2.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
hist_2 = model_2.fit(train, labels,
batch_size=32, epochs=5 ) | Home Credit Default Risk |
19,576,670 | output = pd.DataFrame({"id":df_test.id, "target":predict})
output.to_csv("submission.csv",index=False)
output<set_options> | pred = model_2.predict_proba(test ) | Home Credit Default Risk |
19,576,670 | %matplotlib inline
InteractiveShell.ast_node_interactivity = 'all'
!pip install chart_studio
plotly.offline.init_notebook_mode(connected=True)
cufflinks.go_offline()
cufflinks.set_config_file(world_readable=True, theme='pearl')
warnings.filterwarnings('ignore' )<load_from_csv> | submit = test_df[['SK_ID_CURR']]
submit['TARGET'] = pred
submit.to_csv('NN.csv', index = False ) | Home Credit Default Risk |
17,857,459 | data = pd.read_csv('.. /input/nlp-getting-started/train.csv' )<string_transform> | %matplotlib inline
warnings.filterwarnings('ignore')
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 200 ) | Home Credit Default Risk |
17,857,459 | def create_corpus(target):
corpus = []
for i in data[data['target']==target]['text'].str.split() :
for x in i:
corpus.append(x)
return corpus<categorify> | app_train = pd.read_csv('.. /input/home-credit-default-risk/application_train.csv')
app_test = pd.read_csv('.. /input/home-credit-default-risk/application_test.csv' ) | Home Credit Default Risk |
17,857,459 | lemmatizer = WordNetLemmatizer()
def preprocess_data(data):
text = re.sub(r'https?://\S+|www\.\S+|http?://\S+',' ',data)
text = re.sub(r"won't", " will not", text)
text = re.sub(r"won't've", " will not have", text)
text = re.sub(r"can't", " can not", text)
text = re.sub(r"don't", " do not", text)
text = re.sub(r"can't've", " can not have", text)
text = re.sub(r"ma'am", " madam", text)
text = re.sub(r"let's", " let us", text)
text = re.sub(r"ain't", " am not", text)
text = re.sub(r"shan't", " shall not", text)
text = re.sub(r"sha
't", " shall not", text)
text = re.sub(r"o'clock", " of the clock", text)
text = re.sub(r"y'all", " you all", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"n't've", " not have", text)
text = re.sub(r"'re", " are", text)
text = re.sub(r"'s", " is", text)
text = re.sub(r"'d", " would", text)
text = re.sub(r"'d've", " would have", text)
text = re.sub(r"'ll", " will", text)
text = re.sub(r"'ll've", " will have", text)
text = re.sub(r"'t", " not", text)
text = re.sub(r"'ve", " have", text)
text = re.sub(r"'m", " am", text)
text = re.sub(r"'re", " are", text)
text = re.sub(r'<.*?>',' ',text)
text = re.sub(r'[0-9]', '', text)
text = re.sub("["
u"\U0001F600-\U0001F64F"
u"\U0001F300-\U0001F5FF"
u"\U0001F680-\U0001F6FF"
u"\U0001F1E0-\U0001F1FF"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+",' ',text)
text = re.sub('[^a-zA-Z]',' ',text)
text = re.sub(r"\([^() ]*\)", "", text)
text = re.sub('@\S+', '', text)
text = re.sub('[%s]' % re.escape (<feature_engineering> | print('Training data shape: ', app_train.shape)
app_train.head()
| Home Credit Default Risk |
17,857,459 | common_words = ['via','like','build','get','would','one','two','feel',
'lol','fuck','take','way','may','first','latest','want',
'make','back','see','know','let','look','come','got',
'still','say','think','great','pleas','amp']
def text_cleaning(data):
return ' '.join(i for i in data.split() if i not in common_words)
data["Cleaned_text"] = data["Cleaned_text"].apply(text_cleaning )<features_selection> | app_train['TARGET'].value_counts() | Home Credit Default Risk |
17,857,459 | def top_ngrams(data,n,grams):
count_vec = CountVectorizer(ngram_range=(grams,grams)).fit(data)
bow = count_vec.transform(data)
add_words = bow.sum(axis=0)
word_freq = [(word, add_words[0, idx])for word, idx in count_vec.vocabulary_.items() ]
word_freq = sorted(word_freq, key = lambda x: x[1], reverse=True)
return word_freq[:n]<create_dataframe> | app_train.dtypes.value_counts()
| Home Credit Default Risk |
17,857,459 | common_uni = top_ngrams(data["Cleaned_text"],10,1)
common_bi = top_ngrams(data["Cleaned_text"],10,2)
common_tri = top_ngrams(data["Cleaned_text"],10,3)
common_uni_df = pd.DataFrame(common_uni,columns=['word','freq'])
common_bi_df = pd.DataFrame(common_bi,columns=['word','freq'])
common_tri_df = pd.DataFrame(common_tri,columns=['word','freq'] )<prepare_x_and_y> | np.linspace(20,70,num=11 ) | Home Credit Default Risk |
17,857,459 | X_inp_clean = data['Cleaned_text']
X_inp_original = data['text']
y_inp = data['target']<train_model> | age_data=app_train[['TARGET','DAYS_BIRTH']]
age_data['DAYS_BIRTH']=-age_data['DAYS_BIRTH']
age_data['YEARS_BIRTH']=age_data['DAYS_BIRTH']/365
age_data['YEARS_BINNED']=pd.cut(age_data['YEARS_BIRTH'],bins=np.linspace(20,70,num=11))
age_data.head(10 ) | Home Credit Default Risk |
17,857,459 | word_tokenizer = Tokenizer()
word_tokenizer.fit_on_texts(X_inp_clean.values)
vocab_length = len(word_tokenizer.word_index)+ 1<string_transform> | age_groups = age_data.groupby('YEARS_BINNED' ).mean()
age_groups | Home Credit Default Risk |
17,857,459 | def embed(corpus):
return word_tokenizer.texts_to_sequences(corpus)
longest_train = max(X_inp_clean.values, key=lambda sentence: len(word_tokenize(sentence)))
length_long_sentence = len(word_tokenize(longest_train))
padded_sentences = pad_sequences(embed(X_inp_clean.values),
length_long_sentence, padding='post' )<feature_engineering> | anom = app_train[app_train['DAYS_EMPLOYED'] == 365243]
non_anom = app_train[app_train['DAYS_EMPLOYED'] != 365243]
print('이상값이 아닌 data의 target 평균: %0.2f%%' %(100 * non_anom['TARGET'].mean()))
print('이상값인 data의 target 평균: %0.2f%%' %(100 * anom['TARGET'].mean()))
| Home Credit Default Risk |
17,857,459 | embeddings_dictionary = dict()
embedding_dim = 100
glove_file = open('.. /input/glove6b100dtxt/glove.6B.100d.txt')
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = np.asarray(records[1:], dtype='float32')
embeddings_dictionary [word] = vector_dimensions
glove_file.close()<feature_engineering> | app_test['DAYS_EMPLOYED_ANOM']=app_test['DAYS_EMPLOYED']==365243
app_test['DAYS_EMPLOYED'].replace({365243:np.nan}, inplace=True)
print('%d 개의 data 중에 testing data에서 %d 개의 이상값이 있다.'%(len(app_test),app_test['DAYS_EMPLOYED_ANOM'].sum())) | Home Credit Default Risk |
17,857,459 | embedding_matrix = np.zeros(( vocab_length, embedding_dim))
for word, index in word_tokenizer.word_index.items() :
embedding_vector = embeddings_dictionary.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector<split> | app_train['DAYS_BIRTH'] = app_train['DAYS_BIRTH'] / -365
app_test['DAYS_BIRTH'] = app_test['DAYS_BIRTH'] / -365
ext=app_train[['TARGET','EXT_SOURCE_1','EXT_SOURCE_2','EXT_SOURCE_3','DAYS_BIRTH']]
extcorr = ext.corr()
extcorr | Home Credit Default Risk |
17,857,459 | X_train, X_val, y_train, y_val = train_test_split(padded_sentences,
y_inp.values,test_size=0.2,random_state=1 )<choose_model_class> | app_train.dtypes.value_counts()
| Home Credit Default Risk |
17,857,459 | def CNN(hp):
model = keras.Sequential()
hp_learning_rate = hp.Choice('learning_rate', values=[3e-2, 3e-3, 3e-4, 3e-5])
model.add(Embedding(vocab_length, 100, weights=[embedding_matrix],
input_length=length_long_sentence,trainable=False))
model.add(Conv1D(filters=hp.Int('conv_1_filter',min_value=21,max_value=200,step=14),
kernel_size=hp.Choice('conv_1_kernel',values=[3,4,5]),
activation='relu'))
model.add(Dropout(0.3))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(units = hp.Int('dense_1',min_value=21,max_value=150,step=14),
activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1,activation='sigmoid'))
model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
return model<choose_model_class> | object_columns = app_train.dtypes[app_train.dtypes == 'object'].index.tolist()
object_columns | Home Credit Default Risk |
17,857,459 | tuner_CNN = kt.Hyperband(CNN,objective='val_accuracy',
max_epochs=15,factor=5,
directory='my_dir',
project_name='DisasterTweets_kt',
overwrite=True )<train_on_grid> | cond_1 =(app_train['TARGET'] == 1)
cond_0 =(app_train['TARGET'] == 0)
for a in obj:
print(a)
print('
연체인 경우
',app_train[cond_1][a].value_counts() /app_train[cond_1].shape[0])
print('
연체가 아닌 경우
',app_train[cond_0][a].value_counts() /app_train[cond_0].shape[0])
print('----------------------------' ) | Home Credit Default Risk |
17,857,459 | stop_early = EarlyStopping(monitor='val_loss', mode='min',
verbose=1, patience=10)
tuner_CNN.search(X_train, y_train, epochs=15,
validation_data=(X_val,y_val),callbacks=[stop_early])
best_hps_CNN=tuner_CNN.get_best_hyperparameters(num_trials=1)[0]<train_model> | def missing_values_table(df):
miss = df.isnull().sum()
miss_percent = 100 * miss / len(df)
mis_table = pd.concat([miss, miss_percent], axis=1)
mis_val_table = mis_table.rename(
columns = {0 : 'Missing Values', 1 : '% of Total Values'})
mis_val_table = mis_val_table[
mis_val_table.iloc[:,1] != 0].sort_values(
'% of Total Values', ascending=False ).round(1)
print("선택된 데이터프레임은 " + str(df.shape[1])+ "개의 컬럼이 있다.
"
"그중에서 " + str(mis_val_table.shape[0])+
" 개는 결측값이 있는 컬럼이다.")
return mis_val_table | Home Credit Default Risk |
17,857,459 | model_CNN = tuner_CNN.hypermodel.build(best_hps_CNN)
checkpoint = ModelCheckpoint(
'model_CNN.h5',
monitor = 'val_loss',
verbose = 1,
save_best_only = True
)
history_CNN = model_CNN.fit(X_train, y_train,epochs=50,
validation_data=(X_val,y_val),
callbacks=[checkpoint,stop_early] )<choose_model_class> | missing_values = missing_values_table(app_train)
missing_values.head(20 ) | Home Credit Default Risk |
17,857,459 | def MultichannelCNN(hp):
inputs1 = Input(shape=(length_long_sentence,))
embedding1 = Embedding(vocab_length, 100, weights=[embedding_matrix],
input_length=length_long_sentence, trainable=False )(inputs1)
conv1 = Conv1D(filters=hp.Int('conv_1_filter',min_value=21,max_value=150,step=14),
kernel_size=hp.Choice('conv_1_kernel',values=[3,4,5,6,7,8]),
activation='relu' )(embedding1)
drop1 = Dropout(0.3 )(conv1)
pool1 = MaxPooling1D()(drop1)
flat1 = Flatten()(pool1)
inputs2 = Input(shape=(length_long_sentence,))
embedding2 = Embedding(vocab_length, 100, weights=[embedding_matrix],
input_length=length_long_sentence,trainable=False )(inputs2)
conv2 = Conv1D(filters=hp.Int('conv_2_filter',min_value=21,max_value=150,step=14),
kernel_size=hp.Choice('conv_2_kernel',values=[3,4,5,6,7,8]),
activation='relu' )(embedding2)
drop2 = Dropout(0.3 )(conv2)
pool2 = MaxPooling1D()(drop2)
flat2 = Flatten()(pool2)
merged = concatenate([flat1, flat2])
dense1 = Dense(units = hp.Int('dense_1',min_value=21,max_value=120,step=14),
activation='relu' )(merged)
drop4 = Dropout(0.5 )(dense1)
outputs = Dense(1, activation='sigmoid' )(drop4)
model = Model(inputs=[inputs1, inputs2], outputs=outputs)
hp_learning_rate = hp.Choice('learning_rate', values=[3e-2, 3e-3, 3e-4, 3e-5])
model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
return model<train_on_grid> | apps = pd.concat([app_train,app_test])
print(apps.shape ) | Home Credit Default Risk |
17,857,459 | tuner_MCNN = kt.Hyperband(MultichannelCNN,objective='val_accuracy',
max_epochs=15,factor=5,
directory='my_dir',
project_name='DisasterTweetsMCNN_kt',
overwrite=True)
stop_early = EarlyStopping(monitor='val_loss', mode='min',
verbose=1, patience=10)
tuner_MCNN.search([X_train,X_train], y_train, epochs=15,
validation_data=([X_val,X_val], y_val),
callbacks=[stop_early])
best_hps_MCNN=tuner_MCNN.get_best_hyperparameters(num_trials=1)[0]<train_model> | apps['TARGET'].value_counts(dropna=False ) | Home Credit Default Risk |
17,857,459 | model_MCNN = tuner_MCNN.hypermodel.build(best_hps_MCNN)
checkpoint = ModelCheckpoint(
'model_MCNN.h5',
monitor = 'val_loss',
verbose = 1,
save_best_only = True
)
history_MCNN = model_MCNN.fit([X_train,X_train], y_train,epochs=50,
validation_data=([X_val,X_val], y_val),
callbacks=[checkpoint,stop_early] )<choose_model_class> | object_col = apps.dtypes[apps.dtypes == 'object'].index.tolist()
for column in object_col:
apps[column] = pd.factorize(apps[column])[0] | Home Credit Default Risk |
17,857,459 | def BiLSTM(hp):
model = Sequential()
model.add(Embedding(input_dim=embedding_matrix.shape[0],
output_dim=embedding_matrix.shape[1],
weights = [embedding_matrix],
input_length=length_long_sentence,trainable = False))
model.add(Bidirectional(CuDNNLSTM(units = hp.Int('dense_1',
min_value=21,max_value=120,step=14)
,return_sequences = True)))
model.add(GlobalMaxPool1D())
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(units = hp.Int('dense_1',min_value=21,
max_value=120,step=14),
activation = "relu"))
model.add(Dropout(0.3))
model.add(Dense(units = hp.Int('dense_1',min_value=21,
max_value=100,step=14),
activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(1, activation = 'sigmoid'))
hp_learning_rate = hp.Choice('learning_rate',
values=[3e-2, 3e-3, 3e-4, 3e-5])
model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
return model<train_model> | apps['CREDIT_INCOME_PERCENT'] = apps['AMT_CREDIT'] / apps['AMT_INCOME_TOTAL']
apps['ANNUITY_INCOME_PERCENT'] = apps['AMT_ANNUITY'] / apps['AMT_INCOME_TOTAL']
apps['CREDIT_TERM'] = apps['AMT_ANNUITY'] / apps['AMT_CREDIT']
apps['GOODS_CREDIT_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_CREDIT']
apps['CREDIT_GOODS_DIFF'] = apps['AMT_CREDIT'] - apps['AMT_GOODS_PRICE']
apps['GOODS_INCOME_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_INCOME_TOTAL'] | Home Credit Default Risk |
17,857,459 | tuner_BiLSTM = kt.Hyperband(BiLSTM,objective='val_accuracy',
max_epochs=15,factor=5,
directory='my_dir',
project_name='DisasterTweetsBiLSTM_kt',
overwrite=True)
stop_early = EarlyStopping(monitor='val_loss', mode='min',
verbose=1, patience=12)
tuner_BiLSTM.search(X_train, y_train, epochs=15,
validation_data=(X_val, y_val),
callbacks=[stop_early])
best_hps_BiLSTM=tuner_BiLSTM.get_best_hyperparameters(num_trials=1)[0]<train_model> | apps['APPS_EXT_SOURCE_MEAN'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)
apps['APPS_EXT_SOURCE_STD'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1 ) | Home Credit Default Risk |
17,857,459 | model_BiLSTM = tuner_BiLSTM.hypermodel.build(best_hps_BiLSTM)
checkpoint = ModelCheckpoint(
'model_BiLSTM.h5',
monitor = 'val_loss',
verbose = 1,
save_best_only = True
)
history_BiLSTM = model_BiLSTM.fit(X_train, y_train, epochs=50,
validation_data=(X_val, y_val),
callbacks=[checkpoint,stop_early] )<categorify> | apps['APPS_EXT_SOURCE_STD'] = apps['APPS_EXT_SOURCE_STD'].fillna(apps['APPS_EXT_SOURCE_STD'].mean() ) | Home Credit Default Risk |
17,857,459 | onehot_encoder = OneHotEncoder(sparse=False)
y =(np.asarray(y_inp)).reshape(-1,1)
Y = onehot_encoder.fit_transform(y)
X_train, X_val, y_train, y_val = train_test_split(X_inp_clean,Y,
test_size=0.2, random_state=1 )<load_pretrained> | apps['EMPLOYED_BIRTH_RATIO'] = apps['DAYS_EMPLOYED']/apps['DAYS_BIRTH']
apps['INCOME_EMPLOYED_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_EMPLOYED']
apps['INCOME_BIRTH_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_BIRTH']
apps['CAR_BIRTH_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_BIRTH']
apps['CAR_EMPLOYED_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_EMPLOYED'] | Home Credit Default Risk |
17,857,459 | model_checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True )<define_variables> | ccb = pd.read_csv('.. /input/home-credit-default-risk/credit_card_balance.csv' ) | Home Credit Default Risk |
17,857,459 | tokenizer("Hello, this one sentence!", "And this sentence goes with it." )<categorify> | app_ccb = ccb.merge(app_train, left_on='SK_ID_CURR', right_on='SK_ID_CURR', how='outer')
app_ccb.shape | Home Credit Default Risk |
17,857,459 | def regular_encode(texts, tokenizer, maxlen=512):
enc_di = tokenizer.batch_encode_plus(
texts,
return_token_type_ids=False,
pad_to_max_length=True,
max_length=maxlen,
add_special_tokens = True,
truncation=True
)
return np.array(enc_di['input_ids'])
X_train_t = regular_encode(list(X_train), tokenizer, maxlen=512)
X_val_t = regular_encode(list(X_val), tokenizer, maxlen=512 )<define_variables> | missing_values = missing_values_table(ccb)
missing_values.head(20 ) | Home Credit Default Risk |
17,857,459 | AUTO = tf.data.experimental.AUTOTUNE
batch_size = 16
train_dataset =(
tf.data.Dataset
.from_tensor_slices(( X_train_t, y_train))
.repeat()
.shuffle(1995)
.batch(batch_size)
.prefetch(AUTO)
)
valid_dataset =(
tf.data.Dataset
.from_tensor_slices(( X_val_t, y_val))
.batch(batch_size)
.cache()
.prefetch(AUTO)
)<choose_model_class> | app_ccb.groupby('SK_ID_CURR' ).count() | Home Credit Default Risk |
17,857,459 | def build_model(transformer, max_len=512):
input_word_ids = Input(shape=(max_len,), dtype=tf.int32,
name="input_word_ids")
sequence_output = transformer(input_word_ids)[0]
cls_token = sequence_output[:, 0, :]
out = Dense(2, activation='softmax' )(cls_token)
model = Model(inputs=input_word_ids, outputs=out)
model.compile(optimizer=keras.optimizers.Adam(lr=1e-5),
loss='categorical_crossentropy', metrics=['accuracy'])
return model<load_pretrained> | app_ccb.groupby('SK_ID_CURR')['SK_ID_CURR'].count() | Home Credit Default Risk |
17,857,459 | transformer_layer = TFAutoModel.from_pretrained(model_checkpoint)
model_DistilBert = build_model(transformer_layer )<train_model> | app_ccb_target = ccb.merge(app_train[['SK_ID_CURR', 'TARGET']], on='SK_ID_CURR', how='left')
app_ccb_target.shape | Home Credit Default Risk |
17,857,459 | n_steps = X_train.shape[0] // batch_size
history_DistilBert = model_DistilBert.fit(train_dataset,
steps_per_epoch=n_steps,
validation_data=valid_dataset,
epochs=3 )<feature_engineering> | num_columns = app_ccb_target.dtypes[app_ccb_target.dtypes != 'object'].index.tolist() | Home Credit Default Risk |
17,857,459 | test = pd.read_csv('.. /input/nlp-getting-started/test.csv')
test["Cleaned_text"] = test["text"].apply(preprocess_data)
test["Cleaned_text"] = test["Cleaned_text"].apply(text_cleaning)
test_sentences = pad_sequences(embed(test.Cleaned_text.values),
length_long_sentence, padding='post' )<save_to_csv> | num_columns = [column for column in num_columns if column not in ['SK_ID_PREV', 'SK_ID_CURR', 'TARGET']]
num_columns | Home Credit Default Risk |
17,857,459 | predsCNN = model_CNN.predict_classes(test_sentences)
predictions_test = pd.DataFrame(predsCNN)
test_id = pd.DataFrame(test["id"])
submissionCNN = pd.concat([test_id,predictions_test],axis=1)
submissionCNN.columns = ["id","target"]
submissionCNN.to_csv("submissionCNN.csv",index=False )<save_to_csv> | print(app_ccb_target.groupby('TARGET' ).agg({'AMT_BALANCE': ['mean', 'median', 'count','sum','max']}))
print(app_ccb_target.groupby('TARGET' ).agg({'AMT_CREDIT_LIMIT_ACTUAL': ['mean', 'median', 'count','sum','max']}))
print(app_ccb_target.groupby('TARGET' ).agg({'AMT_INST_MIN_REGULARITY': ['mean', 'median', 'count','sum','max']}))
print(app_ccb_target.groupby('TARGET' ).agg({'CNT_INSTALMENT_MATURE_CUM': ['mean', 'median', 'count','sum','max']}))
print(app_ccb_target.groupby('TARGET' ).agg({'AMT_INST_MIN_REGULARITY': ['mean', 'median', 'count','sum','max']}))
print(app_ccb_target.groupby('TARGET' ).agg({'AMT_CREDIT_LIMIT_ACTUAL': ['mean', 'median', 'count','sum','max']}))
| Home Credit Default Risk |
17,857,459 | predsMCNN = model_MCNN.predict([test_sentences,test_sentences])
predsMCNN =(predsMCNN[:,0] > 0.5 ).astype(np.int)
predictions_test = pd.DataFrame(predsMCNN)
submissionMCNN = pd.concat([test_id,predictions_test],axis=1)
submissionMCNN.columns = ["id","target"]
submissionMCNN.to_csv("submissionMCNN.csv",index=False )<save_to_csv> | ccb_amt_agg=ccb_amt_agg.reset_index()
ccb_amt_agg | Home Credit Default Risk |
17,857,459 | predsBiLSTM = model_BiLSTM.predict(test_sentences)
predsBiLSTM =(predsBiLSTM[:,0] > 0.5 ).astype(np.int)
predictions_test = pd.DataFrame(predsBiLSTM)
submissionBiLSTM = pd.concat([test_id,predictions_test],axis=1)
submissionBiLSTM.columns = ["id","target"]
submissionBiLSTM.to_csv("submissionBiLSTM.csv",index=False )<save_to_csv> | ccb_amt_agg=ccb_amt_agg.drop(['CCB_SK_ID_CURR_COUNT'],axis=1)
ccb_amt_agg | Home Credit Default Risk |
17,857,459 | X_test = regular_encode(list(test.Cleaned_text), tokenizer, maxlen=512)
test1 =(tf.data.Dataset.from_tensor_slices(X_test ).batch(batch_size))
pred = model_DistilBert.predict(test1,verbose = 0)
pred = np.argmax(pred,axis=-1)
pred = pred.astype('int32')
res=pd.read_csv('.. /input/nlp-getting-started/sample_submission.csv',index_col=None)
res['target'] = pred
res.to_csv('submissionDistilBert.csv',index=False )<install_modules> | apps = apps.merge(ccb_amt_agg, left_on='SK_ID_CURR', right_on='SK_ID_CURR', how='left')
app_ccb.shape | Home Credit Default Risk |
17,857,459 | !pip install -U lightautoml<import_modules> | object_col = apps.dtypes[apps.dtypes == 'object'].index.tolist()
for column in object_col:
apps[column] = pd.factorize(apps[column])[0] | Home Credit Default Risk |
17,857,459 | from lightautoml.automl.presets.tabular_presets import TabularAutoML, TabularUtilizedAutoML
from lightautoml.dataset.roles import DatetimeRole, CategoryRole
from lightautoml.tasks import Task
from sklearn.metrics import classification_report, roc_auc_score<load_from_csv> | apps_train = apps[~apps['TARGET'].isnull() ]
apps_test = apps[apps['TARGET'].isnull() ]
apps.shape, apps_train.shape, apps_test.shape | Home Credit Default Risk |
17,857,459 | train_df = pd.read_csv('.. /input/cat-in-the-dat/train.csv')
test_df = pd.read_csv('.. /input/cat-in-the-dat/test.csv')
submission_df = pd.read_csv('.. /input/cat-in-the-dat/sample_submission.csv' )<define_variables> | ftr_app = apps_train.drop(['SK_ID_CURR', 'TARGET'], axis=1)
target_app = apps_train['TARGET']
train_x, valid_x, train_y, valid_y = train_test_split(ftr_app, target_app, test_size=0.3, random_state=2020)
train_x.shape, valid_x.shape | Home Credit Default Risk |
17,857,459 | N_THREADS = 4
N_FOLDS = 5
RANDOM_STATE = 42
TEST_SIZE = 0.2
TIMEOUT = 1800
TARGET_NAME = 'target'
np.random.seed(RANDOM_STATE )<data_type_conversions> | clf = LGBMClassifier(
n_jobs=-1,
n_estimators=1000,
learning_rate=0.02,
num_leaves=32,
subsample=0.8,
max_depth=12,
silent=-1,
verbose=-1
)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric= 'auc', verbose= 100,
early_stopping_rounds= 100 ) | Home Credit Default Risk |
17,857,459 | def preprocess(df):
df['time'] =(np.datetime64('2018-01-01')+ df['day'].astype(np.dtype('timedelta64[D]')) + df['month'].astype(np.dtype('timedelta64[M]')) ).astype(str)
return df.drop(columns=['id', 'day', 'month'])
train = preprocess(train_df)
test = preprocess(test_df )<drop_column> | preds = clf.predict_proba(apps_test.drop(['SK_ID_CURR', 'TARGET'], axis=1)) [:, 1 ] | Home Credit Default Risk |
17,857,459 | task = Task('binary',)
roles = {'target': TARGET_NAME,
DatetimeRole(base_date=True, seasonality=('m', 'd', 'wd', 'hour'), base_feats=True): 'time',
CategoryRole(ordinal=False): ['bin_0', 'bin_1', 'bin_2', 'bin_3', 'bin_4', 'nom_0', 'nom_1', 'nom_2', 'nom_3', 'nom_4', 'nom_5', 'nom_6', 'nom_7', 'nom_8', 'nom_9',],
CategoryRole(ordinal=True): ['ord_0', 'ord_1', 'ord_2', 'ord_3', 'ord_4', 'ord_5',],
}<choose_model_class> | apps_test['TARGET'] = preds
apps_test[['SK_ID_CURR', 'TARGET']].to_csv('apps_baseline05.csv', index=False ) | Home Credit Default Risk |
17,896,962 | automl = TabularUtilizedAutoML(task = task,
verbose=2,
timeout = TIMEOUT,
general_params = {'nested_cv': False, 'use_algos': [['linear_l2', 'lgb', 'lgb_tuned', 'cb', 'cb_tuned']]},
reader_params = {'cv': N_FOLDS, 'random_state': RANDOM_STATE},
tuning_params = {'max_tuning_iter': 20},
lgb_params = {'default_params': {'num_threads': N_THREADS, }},
cb_params = {'default_params': {'thread_count': N_THREADS, }}
)
oof_pred = automl.fit_predict(train, roles = roles)
<compute_test_metric> | %matplotlib inline
warnings.filterwarnings('ignore')
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 200 ) | Home Credit Default Risk |
17,896,962 | print(roc_auc_score(train[TARGET_NAME].values.ravel() , oof_pred.data.ravel()))<compute_test_metric> | app_train = pd.read_csv('.. /input/home-credit-default-risk/application_train.csv')
app_test = pd.read_csv('.. /input/home-credit-default-risk/application_test.csv' ) | Home Credit Default Risk |
17,896,962 | thres =.5
print(classification_report(train[TARGET_NAME].values.ravel() ,(oof_pred.data.ravel() > thres ).astype(int), digits=6))<predict_on_test> | print(app_train.isnull().sum())
print("결측치 있는 컴럼 개수: ",sum(app_train.isnull().sum() !=0)) | Home Credit Default Risk |
17,896,962 | test_pred = automl.predict(test )<feature_engineering> | app_train['TARGET'].value_counts() | Home Credit Default Risk |
17,896,962 | submission_df['target'] = test_pred.data.ravel()
submission_df.head()<save_to_csv> | columns = ['AMT_INCOME_TOTAL','AMT_CREDIT', 'AMT_ANNUITY', 'AMT_GOODS_PRICE', 'DAYS_BIRTH', 'DAYS_EMPLOYED', 'DAYS_ID_PUBLISH',
'DAYS_REGISTRATION', 'DAYS_LAST_PHONE_CHANGE', 'CNT_FAM_MEMBERS', 'REGION_RATING_CLIENT', 'EXT_SOURCE_1',
'EXT_SOURCE_2', 'EXT_SOURCE_3', 'AMT_REQ_CREDIT_BUREAU_HOUR', 'AMT_REQ_CREDIT_BUREAU_DAY', 'AMT_REQ_CREDIT_BUREAU_WEEK',
'AMT_REQ_CREDIT_BUREAU_MON', 'AMT_REQ_CREDIT_BUREAU_QRT', 'AMT_REQ_CREDIT_BUREAU_YEAR']
show_hist_by_target(app_train, columns ) | Home Credit Default Risk |
17,896,962 | submission_df.to_csv('submission.csv', index=False )<import_modules> | cond_1 =(app_train['TARGET'] == 1)
cond_0 =(app_train['TARGET'] == 0)
print('CODE_GENDER
')
print(app_train['CODE_GENDER'].value_counts() /app_train.shape[0])
print('
연체인 경우
',app_train[cond_1]['CODE_GENDER'].value_counts() /app_train[cond_1].shape[0])
print('
연체가 아닌 경우
',app_train[cond_0]['CODE_GENDER'].value_counts() /app_train[cond_0].shape[0] ) | Home Credit Default Risk |
17,896,962 | import numpy as np
import pandas as pd
import lightgbm as lgb
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import roc_auc_score
from multiprocessing import cpu_count
from tqdm.notebook import tqdm
from cairosvg import svg2png
from PIL import Image
from io import BytesIO
import gc
import os
import sys<define_variables> | app_train['DAYS_EMPLOYED'].value_counts() | Home Credit Default Risk |
17,896,962 | VERSION = 'V1E'
NUM_BOOST_ROUND = 5000
VERBOSE_EVAL = 10
METRICS = ['auc']
N_ROWS = 99271300
def get_index_np() :
return np.arange(N_ROWS )<load_pretrained> | app_train['DAYS_EMPLOYED'] = app_train['DAYS_EMPLOYED'].replace(365243, np.nan)
app_train['DAYS_EMPLOYED'].value_counts(dropna=False ) | Home Credit Default Risk |
17,896,962 | FEATURES = np.load(f'/kaggle/input/riiid-training-and-prediction-using-a-state-data/train_features_{VERSION}.npz', allow_pickle=True )<define_variables> | app_train['CODE_GENDER'].value_counts() | Home Credit Default Risk |
17,896,962 | given_features = [
'prior_question_elapsed_time',
]
deduced_features = [
'mean_user_accuracy',
'answered_correctly_user',
'answered_user',
'mean_content_accuracy',
'part',
'hmean_user_content_accuracy',
'attempt',
]
features = given_features + deduced_features
target = 'answered_correctly'
categorical_feature = ['part', 'tags', 'tags_label', 'prior_question_had_explanation']
categorical_feature_idxs = []
for v in categorical_feature:
try:
categorical_feature_idxs.append(features.index(v))
except:
pass<data_type_conversions> | apps = pd.concat([app_train, app_test])
print(apps.shape ) | Home Credit Default Risk |
17,896,962 | def get_train_val_idxs(TRAIN_SIZE, VAL_SIZE):
train_idxs = []
val_idxs = []
NEW_USER_FRAC = 1/4
np.random.seed(42)
df = pd.DataFrame(index=get_index_np())
for col in ['user_id']:
df[col] = FEATURES[col]
df['index'] = df.index.values.astype(np.uint32)
user_id_index = df.groupby('user_id')['index'].apply(np.array)
for indices in user_id_index.sample(user_id_index.size, random_state=42):
if len(train_idxs)> TRAIN_SIZE:
break
if len(val_idxs)< VAL_SIZE:
if np.random.rand() < NEW_USER_FRAC:
val_idxs += list(indices)
else:
offset = np.random.randint(0, indices.size)
train_idxs += list(indices[:offset])
val_idxs += list(indices[offset:])
else:
train_idxs += list(indices)
return train_idxs, val_idxs
train_idxs, val_idxs = get_train_val_idxs(int(50e6), 2.5e6)
print(f'len train_idxs: {len(train_idxs)}, len validation_idxs: {len(val_idxs)}' )<prepare_x_and_y> | apps['APPS_EXT_SOURCE_MEAN'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)
apps['APPS_EXT_SOURCE_STD'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1 ) | Home Credit Default Risk |
17,896,962 | def make_x_y(FEATURES, train_idxs, val_idxs):
X_train = np.ndarray(shape=(len(train_idxs), len(features)) , dtype=np.float32)
X_val = np.ndarray(shape=(len(val_idxs), len(features)) , dtype=np.float32)
for idx, feature in enumerate(tqdm(features)) :
X_train[:,idx] = FEATURES[feature][train_idxs].astype(np.float32)
X_val[:,idx] = FEATURES[feature][val_idxs].astype(np.float32)
y_train = FEATURES[target][train_idxs].astype(np.int8)
y_val = FEATURES[target][val_idxs].astype(np.int8)
return X_train, y_train, X_val, y_val
X_train, y_train, X_val, y_val = make_x_y(FEATURES, train_idxs, val_idxs )<create_dataframe> | apps['APPS_EXT_SOURCE_STD'] = apps['APPS_EXT_SOURCE_STD'].fillna(apps['APPS_EXT_SOURCE_STD'].mean() ) | Home Credit Default Risk |
17,896,962 | pd.DataFrame(X_train[:10], columns=features )<create_dataframe> | apps['APPS_ANNUITY_CREDIT_RATIO'] = apps['AMT_ANNUITY']/apps['AMT_CREDIT']
apps['APPS_GOODS_CREDIT_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_CREDIT']
apps['APPS_CREDIT_GOODS_DIFF'] = apps['AMT_CREDIT'] - apps['AMT_GOODS_PRICE'] | Home Credit Default Risk |
17,896,962 | train_data = lgb.Dataset(
data = X_train,
label = y_train,
categorical_feature = None,
)
val_data = lgb.Dataset(
data = X_val,
label = y_val,
categorical_feature = None,
)<drop_column> | apps['APPS_ANNUITY_INCOME_RATIO'] = apps['AMT_ANNUITY']/apps['AMT_INCOME_TOTAL']
apps['APPS_CREDIT_INCOME_RATIO'] = apps['AMT_CREDIT']/apps['AMT_INCOME_TOTAL']
apps['APPS_GOODS_INCOME_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_INCOME_TOTAL']
apps['APPS_CNT_FAM_INCOME_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['CNT_FAM_MEMBERS']
| Home Credit Default Risk |
17,896,962 | del X_train, y_train, X_val, y_val
gc.collect()<init_hyperparams> | apps['APPS_EMPLOYED_BIRTH_RATIO'] = apps['DAYS_EMPLOYED']/apps['DAYS_BIRTH']
apps['APPS_INCOME_EMPLOYED_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_EMPLOYED']
apps['APPS_INCOME_BIRTH_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_BIRTH']
apps['APPS_CAR_BIRTH_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_BIRTH']
apps['APPS_CAR_EMPLOYED_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_EMPLOYED'] | Home Credit Default Risk |
17,896,962 | lgbm_params = {
'objective': 'binary',
'metric': METRICS,
}<train_model> | object_columns = apps.dtypes[apps.dtypes=='object'].index.tolist()
for column in object_columns:
apps[column] = pd.factorize(apps[column])[0] | Home Credit Default Risk |
17,896,962 | %%time
def train() :
evals_result = {}
model = lgb.train(
params = lgbm_params,
train_set = train_data,
valid_sets = [val_data],
num_boost_round = NUM_BOOST_ROUND,
verbose_eval = VERBOSE_EVAL,
evals_result = evals_result,
early_stopping_rounds = 10,
categorical_feature = categorical_feature_idxs,
feature_name = features,
)
model.save_model(f'model_{VERSION}_{NUM_BOOST_ROUND}.lgb')
return model, evals_result
model, evals_result = train()<save_to_csv> | ftr_app = apps_train.drop(['SK_ID_CURR', 'TARGET'], axis=1)
target_app = app_train['TARGET']
train_x, valid_x, train_y, valid_y = train_test_split(ftr_app, target_app, test_size=0.3, random_state=2020)
train_x.shape, valid_x.shape
| Home Credit Default Risk |
17,896,962 | def show_feature_importances(model, importance_type, max_num_features=10**10):
feature_importances = pd.DataFrame()
feature_importances['feature'] = features
feature_importances['value'] = pd.DataFrame(model.feature_importance(importance_type))
feature_importances = feature_importances.sort_values(by='value', ascending=False)
feature_importances.to_csv(f'feature_importances_{importance_type}.csv')
feature_importances = feature_importances[:max_num_features]
plt.figure(figsize=(20, 8))
plt.xlim([0, feature_importances.value.max() *1.1])
plt.title(f'Feature {importance_type}', fontsize=18);
sns.barplot(data=feature_importances, x='value', y='feature', palette='rocket');
for idx, v in enumerate(feature_importances.value):
plt.text(v, idx, " {:.2e}".format(v))
show_feature_importances(model, 'gain')
show_feature_importances(model, 'split' )<drop_column> | clf = LGBMClassifier(
n_jobs=-1,
n_estimators=1000,
learning_rate=0.02,
num_leaves=32,
subsample=0.8,
max_depth=12,
silent=-1,
verbose=-1
) | Home Credit Default Risk |
17,896,962 | del train_data
gc.collect()<create_dataframe> | clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric= 'auc', verbose= 100,
early_stopping_rounds= 100 ) | Home Credit Default Risk |
17,896,962 | def get_features_questions_df() :
features_df = pd.DataFrame(index=get_index_np())
for col in tqdm(['content_id', 'part', 'tags', 'tags_label', 'mean_content_accuracy']):
features_df[col] = FEATURES[col]
features_questions_df = features_df.groupby('content_id')[[
'content_id',
'part',
'tags',
'tags_label',
'mean_content_accuracy',
]].first().reset_index(drop=True ).sort_values('content_id')
return features_questions_df
features_questions_df = get_features_questions_df()
print(f'features_questions_df, rows: {features_questions_df.shape[0]}')
display(features_questions_df.head() )<groupby> | preds = clf.predict_proba(apps_test.drop(['SK_ID_CURR', 'TARGET'], axis=1)) [:, 1 ] | Home Credit Default Risk |
17,896,962 | def get_state() :
features_df = pd.DataFrame(index=get_index_np())
for col in tqdm(['user_id', 'content_id', 'answered_correctly']):
features_df[col] = FEATURES[col]
mean_user_accuracy = features_df.groupby('user_id')['answered_correctly'].mean().values
answered_correctly_user = features_df.groupby('user_id')['answered_correctly'].sum().values
answered_user = features_df.groupby('user_id')['answered_correctly'].count().values
state = dict()
for user_id in features_df['user_id'].unique() :
state[user_id] = {}
total = len(state.keys())
user_content = features_df.groupby('user_id')['content_id'].apply(np.array ).apply(np.sort ).apply(np.unique)
user_attempts = features_df.groupby(['user_id', 'content_id'])['content_id'].count().astype(np.uint8 ).groupby('user_id' ).apply(np.array ).values
user_attempts -= 1
for user_id, content, attempt in tqdm(zip(state.keys() , user_content, user_attempts),total=total):
state[user_id]['user_content_attempts'] = dict(zip(content, attempt))
del user_content, user_attempts
gc.collect()
for idx, user_id in enumerate(state.keys()):
state[user_id]['mean_user_accuracy'] = mean_user_accuracy[idx]
state[user_id]['answered_correctly_user'] = answered_correctly_user[idx]
state[user_id]['answered_user'] = answered_user[idx]
return state
state = get_state()
print('Example of the state for user 2746, attempt counting starts at 0 as the pandas cumcount function is used to create the attempt feature')
display(state[2746] )<feature_engineering> | app_test['TARGET'] = preds
app_test[['SK_ID_CURR', 'TARGET']].to_csv('apps_baseline_02.csv', index=False ) | Home Credit Default Risk |
17,896,962 | def get_user_data(state, test_df):
attempt, mean_user_accuracy, answered_correctly_user, answered_user = [], [], [], []
for idx,(user_id, content_id)in test_df[['user_id', 'content_id']].iterrows() :
if user_id in state:
if content_id in state[user_id]['user_content_attempts']:
state[user_id]['user_content_attempts'][content_id] = min(4, state[user_id]['user_content_attempts'][content_id] + 1)
else:
state[user_id]['user_content_attempts'][content_id] = 0
else:
dict_keys = ['mean_user_accuracy', 'answered_correctly_user', 'answered_user', 'user_content_attempts']
dict_default_vals = [0.680, 0, 0, dict(zip([content_id],[0])) ]
state[user_id] = dict(zip(dict_keys, dict_default_vals))
attempt.append(state[user_id]['user_content_attempts'][content_id])
mean_user_accuracy.append(state[user_id]['mean_user_accuracy'])
answered_correctly_user.append(state[user_id]['answered_correctly_user'])
answered_user.append(state[user_id]['answered_user'])
return attempt, mean_user_accuracy, answered_correctly_user, answered_user<feature_engineering> | def get_apps_processed(apps):
apps['APPS_EXT_SOURCE_MEAN'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)
apps['APPS_EXT_SOURCE_STD'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)
apps['APPS_EXT_SOURCE_STD'] = apps['APPS_EXT_SOURCE_STD'].fillna(apps['APPS_EXT_SOURCE_STD'].mean())
apps['APPS_ANNUITY_CREDIT_RATIO'] = apps['AMT_ANNUITY']/apps['AMT_CREDIT']
apps['APPS_GOODS_CREDIT_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_CREDIT']
apps['APPS_ANNUITY_INCOME_RATIO'] = apps['AMT_ANNUITY']/apps['AMT_INCOME_TOTAL']
apps['APPS_CREDIT_INCOME_RATIO'] = apps['AMT_CREDIT']/apps['AMT_INCOME_TOTAL']
apps['APPS_GOODS_INCOME_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_INCOME_TOTAL']
apps['APPS_CNT_FAM_INCOME_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['CNT_FAM_MEMBERS']
apps['APPS_EMPLOYED_BIRTH_RATIO'] = apps['DAYS_EMPLOYED']/apps['DAYS_BIRTH']
apps['APPS_INCOME_EMPLOYED_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_EMPLOYED']
apps['APPS_INCOME_BIRTH_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_BIRTH']
apps['APPS_CAR_BIRTH_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_BIRTH']
apps['APPS_CAR_EMPLOYED_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_EMPLOYED']
return apps | Home Credit Default Risk |
17,896,962 | def update_user_data(state, features_questions_df, prev_test_df):
for user_id, content_id, answered_correctly in prev_test_df[['user_id', 'content_id', 'answered_correctly']].values:
state[user_id]['answered_correctly_user'] += answered_correctly
state[user_id]['answered_user'] += 1
state[user_id]['mean_user_accuracy'] = state[user_id]['answered_correctly_user'] / state[user_id]['answered_user']<split> | prev = pd.read_csv('.. /input/home-credit-default-risk/previous_application.csv')
print(prev.shape, apps.shape ) | Home Credit Default Risk |
17,896,962 | env = riiideducation.make_env()
iter_test = env.iter_test()<feature_engineering> | prev.groupby('SK_ID_CURR')['SK_ID_CURR'].count().mean() | Home Credit Default Risk |
17,896,962 | prev_test_df = None
mean_attempt_acc_factor = FEATURES['mean_attempt_acc_factor']
for idx,(test_df, _)in tqdm(enumerate(iter_test)) :
if prev_test_df is not None:
prev_test_df['answered_correctly'] = eval(test_df['prior_group_answers_correct'].iloc[0])
update_user_data(state, features_questions_df, prev_test_df.loc[prev_test_df['content_type_id'] == 0])
if idx is 1:
display(test_df)
display(prev_test_df)
attempt, mean_user_accuracy, answered_correctly_user, answered_user = get_user_data(state, test_df)
test_df['attempt'] = attempt
test_df['mean_user_accuracy'] = mean_user_accuracy
test_df['answered_correctly_user'] = answered_correctly_user
test_df['answered_user'] = answered_user
test_df = features_questions_df.merge(test_df, how='right', on='content_id')
test_df['prior_question_elapsed_time'].fillna(23916, inplace=True)
test_df['hmean_user_content_accuracy'] = 2 *(
(test_df['mean_user_accuracy'] * test_df['mean_content_accuracy'])/
(test_df['mean_user_accuracy'] + test_df['mean_content_accuracy'])
)
test_df['answered_correctly'] = model.predict(test_df[features])
env.predict(test_df.loc[test_df['content_type_id'] == 0, ['row_id', 'answered_correctly']])
prev_test_df = test_df.copy()<load_from_csv> | app_prev_target = prev.merge(app_train[['SK_ID_CURR', 'TARGET']], on='SK_ID_CURR', how='left')
app_prev_target.shape | Home Credit Default Risk |
17,896,962 | submission = pd.read_csv('./submission.csv' )<set_options> | def get_prev_processed(prev):
prev['PREV_CREDIT_DIFF'] = prev['AMT_APPLICATION'] - prev['AMT_CREDIT']
prev['PREV_GOODS_DIFF'] = prev['AMT_APPLICATION'] - prev['AMT_GOODS_PRICE']
prev['PREV_CREDIT_APPL_RATIO'] = prev['AMT_CREDIT']/prev['AMT_APPLICATION']
prev['PREV_ANNUITY_APPL_RATIO'] = prev['AMT_ANNUITY']/prev['AMT_APPLICATION']
prev['PREV_GOODS_APPL_RATIO'] = prev['AMT_GOODS_PRICE']/prev['AMT_APPLICATION']
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
prev['PREV_DAYS_LAST_DUE_DIFF'] = prev['DAYS_LAST_DUE_1ST_VERSION'] - prev['DAYS_LAST_DUE']
all_pay = prev['AMT_ANNUITY'] * prev['CNT_PAYMENT']
prev['PREV_INTERESTS_RATE'] =(all_pay/prev['AMT_CREDIT'] - 1)/prev['CNT_PAYMENT']
return prev | Home Credit Default Risk |
17,896,962 | %reload_ext autoreload
%autoreload 2
%matplotlib inline<import_modules> | def get_prev_amt_agg(prev):
agg_dict = {
'SK_ID_CURR':['count'],
'AMT_CREDIT':['mean', 'max', 'sum'],
'AMT_ANNUITY':['mean', 'max', 'sum'],
'AMT_APPLICATION':['mean', 'max', 'sum'],
'AMT_DOWN_PAYMENT':['mean', 'max', 'sum'],
'AMT_GOODS_PRICE':['mean', 'max', 'sum'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
'PREV_CREDIT_DIFF':['mean', 'max', 'sum'],
'PREV_CREDIT_APPL_RATIO':['mean', 'max'],
'PREV_GOODS_DIFF':['mean', 'max', 'sum'],
'PREV_GOODS_APPL_RATIO':['mean', 'max'],
'PREV_DAYS_LAST_DUE_DIFF':['mean', 'max', 'sum'],
'PREV_INTERESTS_RATE':['mean', 'max']
}
prev_group = prev.groupby('SK_ID_CURR')
prev_amt_agg = prev_group.agg(agg_dict)
prev_amt_agg.columns = ["PREV_"+ "_".join(x ).upper() for x in prev_amt_agg.columns.ravel() ]
prev_amt_agg = prev_amt_agg.reset_index()
return prev_amt_agg | Home Credit Default Risk |
17,896,962 | import numpy as np
import pandas as pd
from datetime import datetime
from collections import Counter
import json
import os
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten, Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator<define_variables> | def get_prev_refused_appr_agg(prev):
prev_refused_appr_group = prev[prev['NAME_CONTRACT_STATUS'].isin(['Approved', 'Refused'])].groupby([ 'SK_ID_CURR', 'NAME_CONTRACT_STATUS'])
prev_refused_appr_agg = prev_refused_appr_group['SK_ID_CURR'].count().unstack()
prev_refused_appr_agg.columns = ['PREV_APPROVED_COUNT', 'PREV_REFUSED_COUNT' ]
prev_refused_appr_agg = prev_refused_appr_agg.fillna(0)
prev_refused_appr_agg = prev_refused_appr_agg.reset_index()
return prev_refused_appr_agg | Home Credit Default Risk |
17,896,962 | dim = 256
train_files = []
test_files = []
country_file = ''
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
path = os.path.join(dirname, filename)
if 'train' in path:
train_files.append(path)
elif 'test' in path:
test_files.append(path)
elif 'json' in path:
country_file = path<feature_engineering> | def get_prev_agg(prev):
prev = get_prev_processed(prev)
prev_amt_agg = get_prev_amt_agg(prev)
prev_refused_appr_agg = get_prev_refused_appr_agg(prev)
prev_agg = prev_amt_agg.merge(prev_refused_appr_agg, on='SK_ID_CURR', how='left')
prev_agg['PREV_REFUSED_RATIO'] = prev_agg['PREV_REFUSED_COUNT']/prev_agg['PREV_SK_ID_CURR_COUNT']
prev_agg['PREV_APPROVED_RATIO'] = prev_agg['PREV_APPROVED_COUNT']/prev_agg['PREV_SK_ID_CURR_COUNT']
prev_agg = prev_agg.drop(['PREV_REFUSED_COUNT', 'PREV_APPROVED_COUNT'], axis=1)
return prev_agg | Home Credit Default Risk |
17,896,962 | with open(country_file)as json_file:
tmp = json.load(json_file)
country = {}
for key in tmp.keys() :
country[int(key)] = tmp[key].split(',')[-1]<create_dataframe> | def get_apps_all_with_prev_agg(apps, prev):
apps_all = get_apps_processed(apps)
prev_agg = get_prev_agg(prev)
print('prev_agg shape:', prev_agg.shape)
print('apps_all before merge shape:', apps_all.shape)
apps_all = apps_all.merge(prev_agg, on='SK_ID_CURR', how='left')
print('apps_all after merge with prev_agg shape:', apps_all.shape)
return apps_all | Home Credit Default Risk |
17,896,962 | train_set = []
for f in train_files:
idx = int(f.split('/')[6])
train_set.append([f, country[idx]])
train_set = pd.DataFrame(train_set, columns=['Image','Country'])
test_set = []
for f in test_files:
test_set.append(f)
test_set = pd.DataFrame(test_set, columns=['Image'] )<choose_model_class> | def get_apps_all_encoded(apps_all):
object_columns = apps_all.dtypes[apps_all.dtypes == 'object'].index.tolist()
for column in object_columns:
apps_all[column] = pd.factorize(apps_all[column])[0]
return apps_all | Home Credit Default Risk |
17,896,962 | train_data_gen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True )<create_dataframe> | def get_apps_all_train_test(apps_all):
apps_all_train = apps_all[~apps_all['TARGET'].isnull() ]
apps_all_test = apps_all[apps_all['TARGET'].isnull() ]
apps_all_test = apps_all_test.drop('TARGET', axis=1)
return apps_all_train, apps_all_test | Home Credit Default Risk |
17,896,962 | train_generator = train_data_gen.flow_from_dataframe(
dataframe = train_set,
directory="",
x_col="Image",
y_col="Country",
class_mode="categorical",
target_size=(dim,dim),
batch_size=32 )<count_values> | def train_apps_all(apps_all_train):
ftr_app = apps_all_train.drop(['SK_ID_CURR', 'TARGET'], axis=1)
target_app = apps_all_train['TARGET']
train_x, valid_x, train_y, valid_y = train_test_split(ftr_app, target_app, test_size=0.3, random_state=2020)
print('train shape:', train_x.shape, 'valid shape:', valid_x.shape)
clf = LGBMClassifier(
n_jobs=-1,
n_estimators=1000,
learning_rate=0.02,
num_leaves=32,
subsample=0.8,
max_depth=12,
silent=-1,
verbose=-1
)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric= 'auc', verbose= 100,
early_stopping_rounds= 100)
return clf | Home Credit Default Risk |
17,896,962 | num_classes = len(Counter(train_generator.classes ).keys() )<choose_model_class> | def get_dataset() :
app_train = pd.read_csv('.. /input/home-credit-default-risk/application_train.csv')
app_test = pd.read_csv('.. /input/home-credit-default-risk/application_test.csv')
apps = pd.concat([app_train, app_test])
prev = pd.read_csv('.. /input/home-credit-default-risk/previous_application.csv')
return apps, prev | Home Credit Default Risk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.