kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
1,020,569
train = pd.read_csv('/kaggle/input/seriestemporales-diplomado/train.txt', sep = ';', index_col=0) test = pd.read_csv('/kaggle/input/seriestemporales-diplomado/test.txt', sep = ';', index_col=0 )<rename_columns>
def GP(data): return(GPI(data)+GPII(data)+GPIII(data)) /3.
Home Credit Default Risk
1,020,569
train = train.set_index('Fecha_Hora' )<feature_engineering>
from sklearn.metrics import roc_auc_score
Home Credit Default Risk
1,020,569
test['Fecha_Hora'] = test['Fecha'] + ' ' + test['Hora']<drop_column>
roc_auc_score(train.TARGET,GPI(train))
Home Credit Default Risk
1,020,569
test = test.drop('Fecha', axis = 1) test = test.drop('Hora', axis = 1) test.head()<drop_column>
roc_auc_score(train.TARGET,GPII(train))
Home Credit Default Risk
1,020,569
test = test[['Fecha_Hora','Poder_Reactivo_Global', 'Voltaje', 'Intensidad_Global', 'Medida_1', 'Medida_2', 'Medida_3']]<drop_column>
roc_auc_score(train.TARGET,GPIII(train))
Home Credit Default Risk
1,020,569
test = test.set_index('Fecha_Hora' )<concatenate>
roc_auc_score(train.TARGET,GP(train))
Home Credit Default Risk
1,020,569
<define_variables><EOS>
Submission = pd.DataFrame({ 'SK_ID_CURR': ID,'TARGET': GP(test ).values }) Submission.to_csv("sample_submission.csv", index=False )
Home Credit Default Risk
1,013,173
<SOS> metric: AUC Kaggle data source: home-credit-default-risk<import_modules>
sns.set(style="whitegrid", color_codes=True) np.random.seed(sum(map(ord, "categorical"))) print(os.listdir(".. /input")) application_train = pd.read_csv(".. /input/application_train.csv") application_test = pd.read_csv(".. /input/application_test.csv") bureau = pd.read_csv(".. /input/bureau.csv") bureau_balance = pd.read_csv(".. /input/bureau_balance.csv") credit_card_balance = pd.read_csv(".. /input/credit_card_balance.csv") installments_payments = pd.read_csv(".. /input/installments_payments.csv") previous_application = pd.read_csv(".. /input/previous_application.csv") POS_CASH_balance = pd.read_csv(".. /input/POS_CASH_balance.csv" )
Home Credit Default Risk
1,013,173
import seaborn as sns<count_missing_values>
sns.set(rc={'figure.figsize':(14.7,8.27)} )
Home Credit Default Risk
1,013,173
dataset.isnull().sum()<define_variables>
bureau_cat = [f_ for f_ in bureau.columns if bureau[f_].dtype == 'object'] dummy_bureau = pd.get_dummies(bureau, columns=bureau_cat) dummy_bureau.head()
Home Credit Default Risk
1,013,173
set_test = dataset.iloc[-8760:,:]<define_variables>
bureau_balance_cat = [f_ for f_ in bureau_balance.columns if bureau_balance[f_].dtype == 'object'] dummy_bureau_balance = pd.get_dummies(bureau_balance, columns=bureau_balance_cat) dummy_bureau_balance.head()
Home Credit Default Risk
1,013,173
set_train = dataset.iloc[:-8760,:]<set_options>
avg_bureau_balance = dummy_bureau_balance.groupby('SK_ID_BUREAU' ).mean() avg_bureau_balance.head()
Home Credit Default Risk
1,013,173
set_train<set_options>
bureau_all = dummy_bureau.merge(right=avg_bureau_balance.reset_index() , how='left', on='SK_ID_BUREAU', suffixes=('', '_balance_')) bureau_all.head()
Home Credit Default Risk
1,013,173
set_test<count_missing_values>
bureau_per_curr = bureau_all[['SK_ID_CURR', 'SK_ID_BUREAU']].groupby('SK_ID_CURR' ).count() bureau_per_curr.head(10) bureau_all['SK_ID_BUREAU'] = bureau_all['SK_ID_CURR'].map(bureau_per_curr['SK_ID_BUREAU'] )
Home Credit Default Risk
1,013,173
droping_list_all=[] for j in range(0,7): if not dataset.iloc[:, j].notnull().all() : droping_list_all.append(j) droping_list_all<data_type_conversions>
avg_bureau = bureau_all.groupby('SK_ID_CURR' ).mean() avg_bureau.head(10 )
Home Credit Default Risk
1,013,173
set_train = set_train.apply(pd.to_numeric )<data_type_conversions>
del avg_bureau_balance, dummy_bureau, dummy_bureau_balance, bureau_all
Home Credit Default Risk
1,013,173
set_train.iloc[:,0]=set_train.iloc[:,0].fillna(set_train.iloc[:,0].mean() )<data_type_conversions>
credit_card_balance_cat = [f_ for f_ in credit_card_balance.columns if credit_card_balance[f_].dtype == 'object'] dummy_credit_card_balance = pd.get_dummies(credit_card_balance, columns=credit_card_balance_cat) dummy_credit_card_balance.head()
Home Credit Default Risk
1,013,173
set_train.iloc[:,1]=set_train.iloc[:,1].fillna(set_train.iloc[:,1].mean() )<count_missing_values>
credit_card_per_curr = dummy_credit_card_balance[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() dummy_credit_card_balance['SK_ID_PREV'] = dummy_credit_card_balance['SK_ID_CURR'].map(credit_card_per_curr['SK_ID_PREV']) avg_credit_card = dummy_credit_card_balance.groupby('SK_ID_CURR' ).mean() avg_credit_card.head()
Home Credit Default Risk
1,013,173
set_train.isnull().sum()<drop_column>
del dummy_credit_card_balance
Home Credit Default Risk
1,013,173
set_train = set_train[['Intensidad_Global', 'Medida_1', 'Medida_2', 'Medida_3', 'Poder_Reactivo_Global', 'Voltaje', 'Poder_Activo_Global']] set_train<drop_column>
installments_payments_cat = [f_ for f_ in installments_payments.columns if installments_payments[f_].dtype == 'object'] dummy_installments_payments = pd.get_dummies(installments_payments, columns=installments_payments_cat) dummy_installments_payments.head()
Home Credit Default Risk
1,013,173
set_test = set_test[['Intensidad_Global', 'Medida_1', 'Medida_2', 'Medida_3', 'Poder_Reactivo_Global', 'Voltaje', 'Poder_Activo_Global']] set_test<normalization>
installments_per_curr = dummy_installments_payments[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() dummy_installments_payments['SK_ID_PREV'] = dummy_installments_payments['SK_ID_CURR'].map(installments_per_curr['SK_ID_PREV']) avg_installments = dummy_installments_payments.groupby('SK_ID_CURR' ).mean() avg_installments.head()
Home Credit Default Risk
1,013,173
values = set_train.values scaler = MinMaxScaler(feature_range=(0, 1)) scaled = scaler.fit_transform(values )<prepare_x_and_y>
del dummy_installments_payments
Home Credit Default Risk
1,013,173
values = scaled n_train_time_start = 1920000 n_train_time = 2000000 train = values[n_train_time_start:n_train_time, :] test = values[n_train_time:, :] train_X, train_y = train[:, :-1], train[:, -1] test_X, test_y = test[:, :-1], test[:, -1] train_X = train_X.reshape(( train_X.shape[0], 1, train_X.shape[1])) test_X = test_X.reshape(( test_X.shape[0], 1, test_X.shape[1])) print(train_X.shape, train_y.shape, test_X.shape, test_y.shape )<compute_test_metric>
pos_cash_balance_cat = [f_ for f_ in POS_CASH_balance.columns if POS_CASH_balance[f_].dtype == 'object'] dummy_POS_CASH_balance = pd.get_dummies(POS_CASH_balance, columns=pos_cash_balance_cat) dummy_POS_CASH_balance.head()
Home Credit Default Risk
1,013,173
def root_mean_squared_error(y_true, y_pred): return K.sqrt(K.mean(K.square(y_pred - y_true)) )<import_modules>
pos_per_curr = dummy_POS_CASH_balance[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() dummy_POS_CASH_balance['SK_ID_PREV'] = dummy_POS_CASH_balance['SK_ID_CURR'].map(pos_per_curr['SK_ID_PREV']) avg_pos = dummy_POS_CASH_balance.groupby('SK_ID_CURR' ).mean() avg_pos.head()
Home Credit Default Risk
1,013,173
<compute_train_metric>
del dummy_POS_CASH_balance
Home Credit Default Risk
1,013,173
yhat = model.predict(test_X) test_X = test_X.reshape(( test_X.shape[0], 6)) inv_yhat = np.concatenate(( yhat, test_X[:, -6:]), axis=1) inv_yhat = scaler.inverse_transform(inv_yhat) inv_yhat = inv_yhat[:,0] test_y = test_y.reshape(( len(test_y), 1)) inv_y = np.concatenate(( test_y, test_X[:, -6:]), axis=1) inv_y = scaler.inverse_transform(inv_y) inv_y = inv_y[:,0] rmse = np.sqrt(mean_squared_error(inv_y, inv_yhat)) print('Test RMSE: %.3f' % rmse )<data_type_conversions>
previous_application_cat = [f_ for f_ in previous_application.columns if previous_application[f_].dtype == 'object'] dummy_previous_application = pd.get_dummies(previous_application, columns=previous_application_cat) dummy_previous_application.head()
Home Credit Default Risk
1,013,173
set_test = set_test.apply(pd.to_numeric )<count_missing_values>
previous_per_curr = dummy_previous_application[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() dummy_previous_application['SK_ID_PREV'] = dummy_previous_application['SK_ID_CURR'].map(previous_per_curr['SK_ID_PREV']) dummy_previous_application.head(10 )
Home Credit Default Risk
1,013,173
set_test.isnull().sum()<predict_on_test>
avg_previous = dummy_previous_application.groupby('SK_ID_CURR' ).mean() avg_previous.head()
Home Credit Default Risk
1,013,173
set_test_to_predict = set_test[['Intensidad_Global', 'Medida_1', 'Medida_2', 'Medida_3', 'Poder_Reactivo_Global', 'Voltaje']]<set_options>
del dummy_previous_application
Home Credit Default Risk
1,013,173
set_test_to_predict<normalization>
del previous_application
Home Credit Default Risk
1,013,173
values = set_test_to_predict.values scaler_test = MinMaxScaler(feature_range=(0, 1)) scaled_test = scaler_test.fit_transform(values )<train_model>
y = application_train['TARGET'] del application_train['TARGET']
Home Credit Default Risk
1,013,173
to_predict = scaled_test to_predict = to_predict.reshape(( to_predict.shape[0], 1, to_predict.shape[1]))<predict_on_test>
cat = [f for f in application_train.columns if application_train[f].dtype == 'object'] for f_ in cat: application_train[f_], indexer = pd.factorize(application_train[f_]) application_test[f_] = indexer.get_indexer(application_test[f_]) for f_ in cat: print('{}: {}'.format(f_, application_train[f_].unique())) application_train.head()
Home Credit Default Risk
1,013,173
ypredict = model.predict(to_predict )<concatenate>
application_train.isnull().sum().sort_values(ascending=False )
Home Credit Default Risk
1,013,173
to_predict = to_predict.reshape(( to_predict.shape[0], 6)) inv_ypredict = np.concatenate(( to_predict[:, :], ypredict), axis=1 )<normalization>
application_train = application_train.merge(right=avg_bureau.reset_index() , how='left', on='SK_ID_CURR') application_test = application_test.merge(right=avg_bureau.reset_index() , how='left', on='SK_ID_CURR')
Home Credit Default Risk
1,013,173
inv_ypredict = scaler.inverse_transform(inv_ypredict) ypredict_final = inv_ypredict[:,-1]<create_dataframe>
application_train = application_train.merge(right=avg_previous.reset_index() , how='left', on='SK_ID_CURR') application_test = application_test.merge(right=avg_previous.reset_index() , how='left', on='SK_ID_CURR')
Home Credit Default Risk
1,013,173
prediction = pd.DataFrame(ypredict_final )<prepare_output>
application_train = application_train.merge(right=avg_pos.reset_index() , how='left', on='SK_ID_CURR') application_test = application_test.merge(right=avg_pos.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
1,013,173
set_test['Poder_Activo_Global'] = ypredict_final<set_options>
application_train = application_train.merge(right=avg_installments.reset_index() , how='left', on='SK_ID_CURR') application_test = application_test.merge(right=avg_installments.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
1,013,173
set_test<define_variables>
application_train = application_train.merge(right=avg_credit_card.reset_index() , how='left', on='SK_ID_CURR') application_test = application_test.merge(right=avg_credit_card.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
1,013,173
enviar = set_test[['Poder_Activo_Global']]<feature_engineering>
X = application_train x_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 42 )
Home Credit Default Risk
1,013,173
enviar['Fecha_Hora'] = enviar.index<drop_column>
train_data=lgb.Dataset(x_train,label=y_train) test_data=lgb.Dataset(x_test,label=y_test )
Home Credit Default Risk
1,013,173
enviar = enviar.reset_index(drop=True) enviar = enviar[['Fecha_Hora', 'Poder_Activo_Global']] enviar<save_to_csv>
params = { 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': {'binary_logloss', 'auc'}, 'metric_freq': 1, 'is_training_metric': True, 'max_bin': 255, 'learning_rate': 0.1, 'num_leaves': 63, 'tree_learner': 'serial', 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'bagging_freq': 5, 'min_data_in_leaf': 50, 'min_sum_hessian_in_leaf': 5, 'is_enable_sparse': True, 'use_two_round_loading': False, 'is_save_binary_file': False, 'num_machines': 1, 'verbose': 0, 'subsample_for_bin': 200000, 'min_child_samples': 20, 'min_child_weight': 0.001, 'min_split_gain': 0.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0, 'reg_lambda': 0.0 }
Home Credit Default Risk
1,013,173
enviar.to_csv('Output.csv',index=False )<load_from_csv>
clf = lgb.train(params, train_data, 2000, valid_sets=test_data, early_stopping_rounds= 40, verbose_eval= 10 )
Home Credit Default Risk
1,013,173
df_train_with_no_id = pd.read_csv('.. /input/train.csv') df_train_with_no_id=df_train_with_no_id.drop(['id'],1) X = np.array(df_train_with_no_id.drop(['diagnosis'],1)) y = np.array(df_train_with_no_id['diagnosis']) X.shape model = linear_model.LogisticRegression() model.fit(X,y) predictions = model.predict(X) model.score(X,y) validation_size = 0.20 seed = 7 X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, y, test_size=validation_size, random_state=seed) name='Logistic Regression' kfold = model_selection.KFold(n_splits=10, random_state=seed) cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring='accuracy') msg = "%s: %f(%f)" %(name, cv_results.mean() , cv_results.std()) print(msg) predictions = model.predict(X_validation) print(accuracy_score(Y_validation, predictions)) print(classification_report(Y_validation, predictions))<save_to_csv>
y_prediction=clf.predict(application_train)
Home Credit Default Risk
1,013,173
datos_submit = pd.read_csv('.. /input/dataForSubmission.csv') ids=datos_submit['id'] datos_submit=datos_submit.drop(['id'],1) predictions=model.predict(datos_submit) predictions resultados=DataFrame({'Id': ids, 'Predicted': predictions}) resultados resultados.to_csv('resultados_1.csv', index = False )<categorify>
score = roc_auc_score(y, y_prediction) print("Overall AUC: {:.3f}".format(score))
Home Credit Default Risk
1,013,173
class VowelConsonantDataset(Dataset): def __init__(self, file_path,train=True,transform=None): self.transform = transform self.file_path=file_path self.train=train self.file_names=[file for _,_,files in os.walk(self.file_path)for file in files] self.len = len(self.file_names) if self.train: self.classes_mapping=self.get_classes() def __len__(self): return len(self.file_names) def __getitem__(self, index): file_name=self.file_names[index] image_data=self.pil_loader(self.file_path+"/"+file_name) if self.transform: image_data = self.transform(image_data) if self.train: file_name_splitted=file_name.split("_") Y1 = self.classes_mapping[file_name_splitted[0]] Y2 = self.classes_mapping[file_name_splitted[1]] z1,z2=torch.zeros(10),torch.zeros(10) z1[Y1-10],z2[Y2]=1,1 label=torch.stack([z1,z2]) return image_data, label else: return image_data, file_name def pil_loader(self,path): with open(path, 'rb')as f: img = Image.open(f) return img.convert('RGB') def get_classes(self): classes=[] for name in self.file_names: name_splitted=name.split("_") classes.extend([name_splitted[0],name_splitted[1]]) classes=list(set(classes)) classes_mapping={} for i,cl in enumerate(sorted(classes)) : classes_mapping[cl]=i return classes_mapping <set_options>
submit = clf.predict(application_test )
Home Credit Default Risk
1,013,173
train_on_gpu = torch.cuda.is_available()<set_options>
application_test['TARGET'] = submit application_test[['SK_ID_CURR', 'TARGET']].to_csv('submission.csv', index=False, float_format='%.8f' )
Home Credit Default Risk
1,483,510
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') device<load_pretrained>
application_train=pd.read_csv(r".. /input/application_train.csv") application_test=pd.read_csv(r".. /input/application_test.csv") bureau_balance=pd.read_csv(r".. /input/bureau_balance.csv") bureau=pd.read_csv(r".. /input/bureau.csv") credit_card_balance=pd.read_csv(r".. /input/credit_card_balance.csv") POS_cash=pd.read_csv(r".. /input/POS_CASH_balance.csv") bureau=pd.read_csv(r".. /input/bureau.csv") previous_application=pd.read_csv(r".. /input/previous_application.csv") install_payment=pd.read_csv(r".. /input/installments_payments.csv" )
Home Credit Default Risk
1,483,510
os.mkdir('.. /Inputs') with zipfile.ZipFile(".. /input/padhai-hindi-vowel-consonant-classification/train.zip","r")as z: z.extractall(".. /Inputs/") with zipfile.ZipFile(".. /input/padhai-hindi-vowel-consonant-classification/test.zip","r")as z: z.extractall(".. /Inputs/" )<set_options>
total_null=application_train.isnull().sum().sort_values(ascending=False) percentage=(application_train.isnull().sum() /application_train.isnull().count() *100 ).sort_values(ascending=False) missing_train_data=pd.concat([total_null,percentage],axis=1,keys=["Total_null","Percentage"] )
Home Credit Default Risk
1,483,510
transform = transforms.Compose([ transforms.ColorJitter() , transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor() , ] )<split>
total_null=POS_cash.isnull().sum().sort_values(ascending=False) percentage=(POS_cash.isnull().sum() /POS_cash.isnull().count() *100 ).sort_values(ascending=False) missing_POS_data=pd.concat([total_null,percentage],axis=1,keys=["Total_null","Percentage"] )
Home Credit Default Risk
1,483,510
batch_size = 60 full_data=VowelConsonantDataset(".. /Inputs/train",train=True,transform=transform) train_size = int(0.9 * len(full_data)) test_size = len(full_data)- train_size train_data, validation_data = random_split(full_data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True) validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=batch_size, shuffle=True) test_data=VowelConsonantDataset(".. /Inputs/test",train=False,transform=transform) test_loader = torch.utils.data.DataLoader(test_data, batch_size=60,shuffle=False )<import_modules>
total_null=bureau.isnull().sum().sort_values(ascending=False) percentage=(bureau.isnull().sum() /bureau.isnull().count() *100 ).sort_values(ascending=False) missing_bureau_data=pd.concat([total_null,percentage],axis=1,keys=["Total_null","Percentage"]) missing_bureau_data.head(15 )
Home Credit Default Risk
1,483,510
from torchvision import models<choose_model_class>
total_null=bureau_balance.isnull().sum().sort_values(ascending=False) percentage=(bureau_balance.isnull().sum() /bureau_balance.isnull().count() *100 ).sort_values(ascending=False) missing_bureau_balance=pd.concat([total_null,percentage],axis=1,keys=["Total_null","Percentage"]) missing_bureau_balance.head(15 )
Home Credit Default Risk
1,483,510
class MyModel(nn.Module): def __init__(self, num_classes1, num_classes2): super(MyModel, self ).__init__() self.model_snet = models.mobilenet_v2(pretrained=True) final_in_features = self.model_snet.classifier[1].in_features mod_classifier = list(self.model_snet.classifier.children())[:-1] self.model_snet.classifier = nn.Sequential(*mod_classifier) self.fc1 = nn.Linear(final_in_features, num_classes1,bias=True) torch.nn.init.xavier_uniform_(self.fc1.weight) torch.nn.init.zeros_(self.fc1.bias) self.fc2 = nn.Linear(final_in_features, num_classes2,bias=True) torch.nn.init.xavier_uniform_(self.fc2.weight) torch.nn.init.zeros_(self.fc2.bias) def forward(self, x): x = self.model_snet(x) out1 = self.fc1(x) out2 = self.fc2(x) return out1, out2<choose_model_class>
total_null=previous_application.isnull().sum().sort_values(ascending=False) percentage=(previous_application.isnull().sum() /previous_application.isnull().count() *100 ).sort_values(ascending=False) missing_previous_application=pd.concat([total_null,percentage],axis=1,keys=["Total_null","Percentage"]) missing_previous_application.head(15 )
Home Credit Default Risk
1,483,510
net = MyModel(10,10 )<data_type_conversions>
total_null=install_payment.isnull().sum().sort_values(ascending=False) percentage=(install_payment.isnull().sum() /install_payment.isnull().count() *100 ).sort_values(ascending=False) missing_installment=pd.concat([total_null,percentage],axis=1,keys=["Total_null","Percentage"]) missing_installment.head()
Home Credit Default Risk
1,483,510
net = net.to(device )<categorify>
total_null=application_test.isnull().sum().sort_values(ascending=False) percentage=(application_test.isnull().sum() /application_test.isnull().count() *100 ).sort_values(ascending=False) missing_app_test=pd.concat([total_null,percentage],axis=1,keys=["Total_null","Percentage"]) missing_app_test.head(15 )
Home Credit Default Risk
1,483,510
def evaluation(dataloader): total, correct = 0, 0 for data in dataloader: inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) _, actual_v = torch.max(labels[:,0,:].data, 1) _, actual_c = torch.max(labels[:,1,:].data, 1) outputs_v,outputs_c = net(inputs) _, pred_v = torch.max(outputs_v.data, 1) _, pred_c = torch.max(outputs_c.data, 1) total += labels.size(0) correct_v =(pred_v == actual_v)*1 correct_c =(pred_c == actual_c)*1 correct_v[correct_v == 0] = 2 correct_c[correct_c == 0] = 3 correct +=(( correct_v==correct_c)).sum().item() return 100 * correct / total<choose_model_class>
application_train.select_dtypes("object" ).nunique()
Home Credit Default Risk
1,483,510
loss_fn = nn.CrossEntropyLoss() plist = [ {'params': net.fc1.parameters() , 'lr': 5e-3}, {'params': net.fc2.parameters() , 'lr': 5e-3} ] lr=0.01 opt = optim.SGD(net.parameters() ,lr=0.01,momentum=0.9,nesterov=True )<init_hyperparams>
lbl=LabelEncoder() lbl_count=0 for i in application_train: if application_train[i].dtype=='object': if len(list(application_train[i].unique())) <= 2: lbl.fit(application_train[i]) application_train[i]=lbl.transform(application_train[i]) application_test[i]=lbl.transform(application_test[i]) lbl_count +=1 print('%d column were encoded.'%lbl_count)
Home Credit Default Risk
1,483,510
%%time loss_arr = [] loss_epoch_arr = [] max_epochs = 10 min_loss = 1000 best_model = None for epoch in range(max_epochs): for i, data in enumerate(train_loader, 0): inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) labels_v = labels[:,0,:] labels_c = labels[:,1,:] _, actual_v = torch.max(labels_v.data, 1) _, actual_c = torch.max(labels_c.data, 1) opt.zero_grad() outputs_v, outputs_c = net(inputs) loss_v = loss_fn(outputs_v, actual_v) loss_c = loss_fn(outputs_c, actual_c) loss = torch.add(loss_v,loss_c) loss.backward() opt.step() if min_loss > loss.item() : min_loss = loss.item() best_model = copy.deepcopy(net.state_dict()) loss_arr.append(loss.item()) del inputs, labels, outputs_v, outputs_c torch.cuda.empty_cache() loss_epoch_arr.append(loss.item()) print('Epoch: %d/%d, Test acc: %0.2f, Train acc: %0.2f' %(epoch, max_epochs, evaluation(validation_loader), evaluation(train_loader))) net.load_state_dict(best_model) plt.plot(loss_epoch_arr) plt.plot(loss_arr) plt.show()<find_best_params>
application_train.select_dtypes("object" ).nunique()
Home Credit Default Risk
1,483,510
net.eval() plist=[] fn_list=[] for inputs_test, fn in test_loader: inputs_test=inputs_test.to(device) out1,out2=net.forward(inputs_test) _,pred1=torch.max(out1,1) pred1=pred1.tolist() _,pred2=torch.max(out2,1) pred2=pred2.tolist() for x,y,z in zip(pred1,pred2,fn): p="V"+str(x)+"_"+"C"+str(y) plist.append(p) fn_list.append(z )<prepare_output>
application_train=pd.get_dummies(application_train) application_test=pd.get_dummies(application_test) print("application_train feature shape:",application_train.shape) print("application_test feature shape:",application_test.shape )
Home Credit Default Risk
1,483,510
submission = pd.DataFrame({"ImageId":fn_list, "Class":plist}) submission.head()<save_to_csv>
train_target=application_train['TARGET'] application_train,application_test=application_train.align(application_test,axis=1,join='inner' )
Home Credit Default Risk
1,483,510
submission.to_csv('submission.csv', index=False )<save_to_csv>
application_train['TARGET']=train_target
Home Credit Default Risk
1,483,510
submission.to_csv('submission.csv', index=False )<import_modules>
application_train.isnull().sum()
Home Credit Default Risk
1,483,510
import numpy as np import pandas as pd from sklearn.neighbors import KNeighborsClassifier<load_from_csv>
application_train.isnull().sum()
Home Credit Default Risk
1,483,510
traindf = pd.read_csv(".. /input/train.csv" )<drop_column>
prev_category = pd.get_dummies(previous_application) bureau_category = pd.get_dummies(bureau) pos_category = pd.get_dummies(POS_cash) credit_category= pd.get_dummies(credit_card_balance)
Home Credit Default Risk
1,483,510
traindf.drop(['Gender'], axis=1, inplace=True )<create_dataframe>
application_train=application_train.fillna(0) application_test=application_test.fillna(0 )
Home Credit Default Risk
1,483,510
traindata = traindf.values<define_search_space>
application_test['is_test'] = 1 application_test['is_train'] = 0 application_train['is_test'] = 0 application_train['is_train'] = 1 Y = application_train['TARGET'] train_X = application_train.drop(['TARGET'], axis = 1) test_id = application_train['SK_ID_CURR'] test_X = application_test data = pd.concat([train_X, test_X], axis=0 )
Home Credit Default Risk
1,483,510
descriptores,clases = traindata[:,0:-1],traindata[:,-1]<define_variables>
prev_apps = previous_application[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() previous_application['SK_ID_PREV'] = previous_application['SK_ID_CURR'].map(prev_apps['SK_ID_PREV']) prev_apps_avg = previous_application.groupby('SK_ID_CURR' ).mean() prev_apps_avg.columns = ['p_' + col for col in prev_apps_avg.columns] data = data.merge(right=prev_apps_avg.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
1,483,510
clases = [1 if clase==True else 0 for clase in clases]<choose_model_class>
bureau_avg = bureau.groupby('SK_ID_CURR' ).mean() bureau_avg['buro_count'] = bureau[['SK_ID_BUREAU','SK_ID_CURR']].groupby('SK_ID_CURR' ).count() ['SK_ID_BUREAU'] bureau_avg.columns = ['b_' + f_ for f_ in bureau_avg.columns] data = data.merge(right=bureau_avg.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
1,483,510
knn = KNeighborsClassifier(n_neighbors=1 )<train_model>
install_pay= install_payment[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() install_payment['SK_ID_PREV'] = install_payment['SK_ID_CURR'].map(install_pay['SK_ID_PREV']) avg_inst = install_payment.groupby('SK_ID_CURR' ).mean() avg_inst.columns = ['i_' + f_ for f_ in avg_inst.columns] data = data.merge(right=avg_inst.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
1,483,510
knn.fit(descriptores,clases )<load_from_csv>
pos_cash = POS_cash[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() POS_cash['SK_ID_PREV'] = POS_cash['SK_ID_CURR'].map(pos_cash['SK_ID_PREV']) POS_avg = POS_cash.groupby('SK_ID_CURR' ).mean() data = data.merge(right=POS_avg.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
1,483,510
testdf = pd.read_csv(".. /input/test.csv") testdf.drop(['Gender'], axis=1, inplace=True) testdata = testdf.values<predict_on_test>
credit_balns= credit_card_balance[['SK_ID_CURR', 'SK_ID_PREV']].groupby('SK_ID_CURR' ).count() credit_card_balance['SK_ID_PREV'] = credit_card_balance['SK_ID_CURR'].map(credit_balns['SK_ID_PREV']) avg_credit_bal = credit_card_balance.groupby('SK_ID_CURR' ).mean() avg_credit_bal.columns = ['credit_bal_' + f_ for f_ in avg_credit_bal.columns] data = data.merge(right=avg_credit_bal.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
1,483,510
predicciones = knn.predict(testdata )<define_variables>
ignore_features = ['SK_ID_CURR', 'is_train', 'is_test'] relevant_features = [col for col in data.columns if col not in ignore_features] trainX = data[data['is_train'] == 1][relevant_features] testX = data[data['is_test'] == 1][relevant_features]
Home Credit Default Risk
1,483,510
predicciones = [True if prediccion==1 else False for prediccion in predicciones]<create_dataframe>
x_train, x_val, y_train, y_val = train_test_split(trainX, Y, test_size=0.2, random_state=18) lgb_train = lgb.Dataset(data=x_train, label=y_train) lgb_eval = lgb.Dataset(data=x_val, label=y_val )
Home Credit Default Risk
1,483,510
soldf = pd.DataFrame(list(enumerate(predicciones)) )<rename_columns>
params = {'task': 'train', 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': 'auc', 'learning_rate': 0.01, 'num_leaves': 48, 'num_iteration': 5000, 'verbose': 0 , 'colsample_bytree':.8, 'subsample':.9, 'max_depth':7, 'reg_alpha':.1, 'reg_lambda':.1, 'min_split_gain':.01, 'min_child_weight':1} model = lgb.train(params, lgb_train, valid_sets=lgb_eval, early_stopping_rounds=170, verbose_eval=200 )
Home Credit Default Risk
1,483,510
soldf.columns = ['Id','Prediction']<save_to_csv>
preds = model.predict(testX) sub = application_test[['SK_ID_CURR']].copy() sub['TARGET'] = preds sub.to_csv('sub.csv', index= False) sub.head(10 )
Home Credit Default Risk
1,432,477
soldf.to_csv("submission.csv",sep=',',index=False )<set_options>
M1 = pd.read_csv('.. /input/diversity/LGBM.798.csv') M2 = pd.read_csv('.. /input/ingredients/WEIGHT_AVERAGE_RANK2.csv') M3 = pd.read_csv('.. /input/neural/sub_nn.csv') M4 = pd.read_csv('.. /input/genetic/pure_submission.csv') M5 = pd.read_csv('.. /input/diversity/xgb.796.csv' )
Home Credit Default Risk
1,432,477
%matplotlib inline mpl.style.use('ggplot') sns.set_style('white' )<load_from_csv>
def merge_dataframes(dfs, merge_keys): dfs_merged = reduce(lambda left,right: pd.merge(left, right, on=merge_keys), dfs) return dfs_merged
Home Credit Default Risk
1,432,477
sample = pd.read_csv('/kaggle/input/mlbio1/sample_submission.csv') test = pd.read_csv('/kaggle/input/mlbio1/test.csv') train = pd.read_csv('/kaggle/input/mlbio1/train.csv') <sort_values>
dfs = [M1,M2,M3,M4,M5] merge_keys=['SK_ID_CURR'] df = merge_dataframes(dfs, merge_keys=merge_keys )
Home Credit Default Risk
1,432,477
total = train.isnull().sum().sort_values(ascending=False) percent =(train.isnull().sum() /train.isnull().count() ).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) missing_data.head(20 )<drop_column>
df.columns = ['SK_ID_CURR','T1','T2','T3','T4','T5'] df.head()
Home Credit Default Risk
1,432,477
train = train.drop('smoking_status', 1) test = test.drop('smoking_status', 1 )<feature_engineering>
pred_prob = 0.5 * df['T2'] + 0.5 * df['T1'] pred_prob.head()
Home Credit Default Risk
1,432,477
median_bmi = train['bmi'].median() train['bmi'] = train['bmi'].fillna(median_bmi) test['bmi'] = test['bmi'].fillna(median_bmi )<train_on_grid>
sub = pd.DataFrame() sub['SK_ID_CURR'] = df['SK_ID_CURR'] sub['target']= pred_prob
Home Credit Default Risk
1,432,477
def cross_validation_for_roc_auc(clf, X, y ,cv=5): X = np.array(X.copy()) y = np.array(y.copy()) kf = KFold(n_splits=cv) kf.get_n_splits(X) scores = [] for train_index, test_index in kf.split(X): X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] clf.fit(X_train, y_train) prediction_on_this_fold = clf.predict_proba(X_test)[:,1] score = roc_auc_score(y_score=prediction_on_this_fold, y_true=y_test) scores.append(score) return scores <compute_train_metric>
sub.to_csv('ldit.csv', index=False )
Home Credit Default Risk
1,432,477
def calc_smooth_mean(train, test, by, on, m): mean = train[on].mean() agg = train.groupby(by)[on].agg(['count', 'mean']) counts = agg['count'] means = agg['mean'] smooth =(counts * means + m * mean)/(counts + m) return train[by].map(smooth), test[by].map(smooth )<compute_train_metric>
B_prob = 0.6 * df['T1'] + 0.2 * df['T3'] + 0.2 * df['T4']
Home Credit Default Risk
1,432,477
for const in range(0, 21, 5): lgb = LGBMClassifier(n_estimators=50, max_depth=3) sgd = SGDClassifier(loss='log', penalty = 'elasticnet') train['gender_target_enc'],test['gender_target_enc'] = \ calc_smooth_mean(train, test, by='gender',on='stroke', m=const) print(const, np.mean(cross_validation_for_roc_auc(lgb, train[['gender_target_enc']] , train['stroke'])) , np.mean(cross_validation_for_roc_auc(sgd, train[['gender_target_enc']] , train['stroke'])) )<drop_column>
SUB = pd.DataFrame() SUB['SK_ID_CURR'] = df['SK_ID_CURR'] SUB['TARGET'] = B_prob SUB.to_csv('Blendss.csv', index=False )
Home Credit Default Risk
1,432,477
train = train.drop('gender_target_enc', 1) test = test.drop('gender_target_enc', 1) train = train.drop('gender', 1) test = test.drop('gender', 1 )<compute_train_metric>
corr_pred = 0.6 * df['T2'] + 0.05 * df['T3'] + 0.05 * df['T4'] + 0.1 * df['T5'] + 0.2 * df['T1'] corr_pred.head()
Home Credit Default Risk
1,432,477
<feature_engineering><EOS>
SuB = pd.DataFrame() SuB['SK_ID_CURR'] = df['SK_ID_CURR'] SuB['TARGET'] = corr_pred SuB.to_csv('corr_blend.csv', index=False )
Home Credit Default Risk
1,224,566
<SOS> metric: AUC Kaggle data source: home-credit-default-risk<categorify>
deb=time.time() path=".. /input" listfiles={"application_train":("SK_ID_CURR",["SK_ID_CURR"]),"application_test":("SK_ID_CURR",["SK_ID_CURR"]),"bureau":("SK_ID_CURR",["SK_ID_CURR","SK_ID_BUREAU"]), "bureau_balance":("SK_ID_BUREAU",["SK_ID_BUREAU"]),"POS_CASH_balance":("SK_ID_PREV",["SK_ID_CURR","SK_ID_PREV"]),"previous_application":("SK_ID_CURR",["SK_ID_CURR","SK_ID_PREV"]), "installments_payments":("SK_ID_PREV",["SK_ID_CURR","SK_ID_PREV"]),"credit_card_balance":("SK_ID_PREV",["SK_ID_CURR","SK_ID_PREV"])} links=[('bureau_balance','bureau'),('installments_payments','previous_application'),('credit_card_balance','previous_application'),('POS_CASH_balance','previous_application'), ('previous_application','full'),('bureau','full')] ftime=[("Start",time.time() ,0)] def timer(title,showtime=True): global ftime newtime=(title,time.time() ,time.time() -ftime[-1][1]) ftime.append(newtime) if showtime==True: print(newtime[0] + " in " + str(np.round(newtime[2],2))) else: print(newtime[0]) data={x:(pd.read_csv(path +"/"+ x + '.csv')).sort_values(listfiles[x][0],ascending=True)for x in listfiles} listetodel=list(set(list(data["application_train"])[44:91])^set(["FONDKAPREMONT_MODE", "HOUSETYPE_MODE", "WALLSMATERIAL_MODE", "EMERGENCYSTATE_MODE"])) print("liste ok") for i in listfiles: data[i].index=list(range(data[i].shape[0])) data["full"]=pd.concat([data["application_train"],data["application_test"]],axis=0,sort=True) data["full"].index=list(range(data["full"].shape[0])) indtest=pd.DataFrame(data["full"]["SK_ID_CURR"].iloc[data["application_train"].shape[0]:]) train_shape,test_shape=data["application_train"].shape[0],data["application_test"].shape[0] del data["application_train"],data["application_test"] dicovars={} timer("Files read and sorted by key") def agregate(dataf,key,table): global dicovars continuous, discrete,dichot=set_groups(dataf) if len(dichot)> 0 : dichot = dichot + [key] vals=sorted(list(set(dataf[key]))) size=len(vals) nbint=10 pace=size//nbint+nbint indmin=dataf[key].index[0] dicodf={} for k,i in enumerate(list(range(pace,size+pace,pace-1))): valmax=vals[i-pace:i-1][-1] indmax=dataf[key][dataf[key]==valmax].index.tolist() [-1] dicosets={} if len(continuous)> 0 : dicosets["continuous"]=dataf[continuous].loc[indmin:indmax,:].groupby(key ).agg(["sum","max","min"]) if len(dichot)> 0 : dicosets["dichot"]=dataf[dichot].loc[indmin:indmax,:].groupby(key ).agg(["sum"]) dicosets["count"]=pd.DataFrame(dataf.loc[indmin:indmax,:].groupby(key)[key].agg("count")) ind=pd.MultiIndex(levels=[[table],["count"]],labels=[[0],[0]]) dicosets["count"].columns=ind for x in list(dicosets): dicosets[x].columns=[x[0]+"_"+x[1] for x in dicosets[x].columns] dicodf[k]=pd.concat([dicosets[x] for x in list(dicosets)],axis=1) del dicosets indmin=indmax+1 dataf_tsf=pd.concat([dicodf[k] for k in list(dicodf)],axis=0) timer("Agreggation done") return dataf_tsf def binar(series,table): global dicovars setserie=set(series) if len(setserie)==2: var=pd.DataFrame(pd.get_dummies(series ).iloc[:,0]) else: var=pd.get_dummies(series) name=series.name var.columns=[str(x)+ "_"+ name for x in var.columns] dicovars[table+ "_"+str(name)]=var.columns.tolist() return var def set_groups(dataf): num=dataf.columns[(dataf.dtypes!="object" ).tolist() ].tolist() nom=dataf.columns[(dataf.dtypes=="object" ).tolist() ].tolist() continuous,discrete,dichot = [],nom,[] for i in num: if len(set(dataf[i].loc[:1000])) >3: continuous.append(i) else: setvar=set(dataf[i]) if len(setvar)>3: continuous.append(i) elif(setvar==set([0,1])and dataf[i].count() ==dataf.shape[0]): dichot.append(i) else: discrete.append(i) timer("Groups set") return(continuous, discrete,dichot) def fill_values(dataf,continuous, discrete,dichot,table,fill_cont=True): dicodf={} if len(discrete)>0: dataf[discrete]=dataf[discrete].fillna('missing') timer("Missing values filled for discrete vars") dicodf["discrete"]=pd.concat([binar(dataf[x],table)for x in discrete],axis=1) liste=list(set(dataf)^set(discrete)) if len(liste)>0: dicodf["Others"]=dataf[liste] fusion=pd.concat([dicodf[x] for x in list(dicodf)],axis=1) return fusion def process(dataf,keys,keyforagg,name): liste=list(set(dataf)^set(keys)) dataf=dataf[liste+[keyforagg]] columnswithoutkeys=list(set(dataf)^set([keyforagg])) continuous, discrete,dichot =set_groups(dataf[columnswithoutkeys]) dataf=fill_values(dataf,continuous, discrete,dichot,name,fill_cont=False) liste=dataf.columns.tolist() for i in range(len(liste)) : if liste[i] not in keys: liste[i]= str(liste[i]+"_" +name) dataf.columns=liste dataf=agregate(dataf,keyforagg,name) dataf[keyforagg]=dataf.index.tolist() dataf.index=list(range(dataf.shape[0])) return dataf diconame={"bureau":"B","bureau_balance":"BB","POS_CASH_balance":"PCB","previous_application":"PA","installments_payments":"IP","credit_card_balance":"CCB"} for i in links: timer("Processing " + i[0],showtime=False) data[i[0]]=process(data[i[0]],listfiles[i[0]][1],listfiles[i[0]][0],diconame[i[0]]) gc.collect() timer("Merging " + i[0] + " with " +i[1],showtime=False) print(data[i[0]].shape) data[i[1]]=data[i[1]].merge(right=data[i[0]],how='left',on=listfiles[i[0]][0]) del data[i[0]] timer("Merging completed") print(data[i[1]].shape) gc.collect() columnswithoutkeys=list(set(data["full"])^set(["SK_ID_CURR"])) continuous, discrete,dichot =set_groups(data["full"][columnswithoutkeys]) data["full"]=fill_values(data["full"],continuous, discrete,dichot,"BB",fill_cont=False) gc.collect() gc.collect() print('total preprocessing time : ' + str(time.time() -deb)) data["full"]=data["full"][data["full"].count() [data["full"].count() >0].index.tolist() ] data["full"]["Annuity_income_ratio"]=data["full"]["AMT_ANNUITY"]/data["full"]["AMT_INCOME_TOTAL"] data["full"]['Credit_annuity_ratio'] = data["full"]['AMT_CREDIT'] / data["full"]['AMT_ANNUITY'] data["full"]['NEW_SOURCES_PROD'] = data["full"]['EXT_SOURCE_1'] * data["full"]['EXT_SOURCE_2'] * data["full"]['EXT_SOURCE_3'] data["full"]['NEW_EXT_SOURCES_MEAN'] = data["full"][['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1) data["full"]['NEW_CREDIT_TO_GOODS_RATIO'] = data["full"]['AMT_CREDIT'] / data["full"]['AMT_GOODS_PRICE'] data["full"]['NEW_EMPLOY_TO_BIRTH_RATIO'] = data["full"]['DAYS_EMPLOYED'] / data["full"]['DAYS_BIRTH'] data["full"]['NEW_PHONE_TO_EMPLOY_RATIO'] = data["full"]['DAYS_LAST_PHONE_CHANGE'] / data["full"]['DAYS_EMPLOYED'] data["full"]['NEW_SCORES_STD'] = data["full"][['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1) liste=list(filter(lambda x :x !="TARGET",list(data["full"]))) X_train, X_val, y_train, y_val = train_test_split(( data["full"].iloc[: train_shape,:])[liste], data["full"]["TARGET"].iloc[:train_shape], test_size=0.2, random_state=42) X_test=(data["full"].iloc[train_shape:,:])[liste] del data["full"] gc.collect() listetodel=['2_STATUS_BB_sum_B_min', '3_STATUS_BB_sum_B_max', '3_STATUS_BB_sum_B_min', '4_STATUS_BB_sum_B_max', '4_STATUS_BB_sum_B_min', 'AMT_BALANCE_CCB_sum_PA_sum', 'AMT_CREDIT_SUM_OVERDUE_B_min', 'AMT_DRAWINGS_ATM_CURRENT_CCB_min_PA_max', 'AMT_DRAWINGS_ATM_CURRENT_CCB_min_PA_min', 'AMT_DRAWINGS_CURRENT_CCB_min_PA_max', 'AMT_DRAWINGS_OTHER_CURRENT_CCB_min_PA_max', 'AMT_DRAWINGS_OTHER_CURRENT_CCB_min_PA_min', 'AMT_DRAWINGS_OTHER_CURRENT_CCB_min_PA_sum', 'AMT_DRAWINGS_OTHER_CURRENT_CCB_sum_PA_max', 'AMT_DRAWINGS_POS_CURRENT_CCB_min_PA_max', 'AMT_DRAWINGS_POS_CURRENT_CCB_min_PA_sum', 'AMT_INST_MIN_REGULARITY_CCB_min_PA_max', 'AMT_INST_MIN_REGULARITY_CCB_sum_PA_sum', 'AMT_RECIVABLE_CCB_min_PA_min', 'AMT_REQ_CREDIT_BUREAU_HOUR', 'AMT_TOTAL_RECEIVABLE_CCB_max_PA_sum', 'AMT_TOTAL_RECEIVABLE_CCB_min_PA_max', 'AMT_TOTAL_RECEIVABLE_CCB_sum_PA_sum', 'APARTMENTS_AVG', 'APARTMENTS_MEDI', 'APARTMENTS_MODE', 'Additional Service_NAME_GOODS_CATEGORY_PA_sum', 'Advertising_ORGANIZATION_TYPE', 'Agriculture_ORGANIZATION_TYPE', 'Amortized debt_NAME_CONTRACT_STATUS_PCB_sum_PA_max', 'Amortized debt_NAME_CONTRACT_STATUS_PCB_sum_PA_min', 'Amortized debt_NAME_CONTRACT_STATUS_PCB_sum_PA_sum', 'Animals_NAME_GOODS_CATEGORY_PA_sum', 'Another type of loan_CREDIT_TYPE_B_sum', 'Approved_NAME_CONTRACT_STATUS_CCB_sum_PA_max', 'Approved_NAME_CONTRACT_STATUS_CCB_sum_PA_min', 'Approved_NAME_CONTRACT_STATUS_CCB_sum_PA_sum', 'Approved_NAME_CONTRACT_STATUS_PCB_sum_PA_max', 'Approved_NAME_CONTRACT_STATUS_PCB_sum_PA_min', 'Approved_NAME_CONTRACT_STATUS_PCB_sum_PA_sum', 'BASEMENTAREA_AVG', 'BASEMENTAREA_MEDI', 'BASEMENTAREA_MODE', 'Bad debt_CREDIT_ACTIVE_B_sum', 'Building a house or an annex_NAME_CASH_LOAN_PURPOSE_PA_sum', 'Business development_NAME_CASH_LOAN_PURPOSE_PA_sum', 'Buying a garage_NAME_CASH_LOAN_PURPOSE_PA_sum', 'Buying a holiday home / land_NAME_CASH_LOAN_PURPOSE_PA_sum', 'Buying a home_NAME_CASH_LOAN_PURPOSE_PA_sum', 'Buying a new car_NAME_CASH_LOAN_PURPOSE_PA_sum', 'Buying a used car_NAME_CASH_LOAN_PURPOSE_PA_sum', 'CLIENT_CODE_REJECT_REASON_PA_sum', 'CNT_CREDIT_PROLONG_B_min', 'CNT_CREDIT_PROLONG_B_sum', 'CNT_DRAWINGS_ATM_CURRENT_CCB_min_PA_max', 'CNT_DRAWINGS_ATM_CURRENT_CCB_min_PA_min', 'CNT_DRAWINGS_ATM_CURRENT_CCB_min_PA_sum', 'CNT_DRAWINGS_CURRENT_CCB_min_PA_max', 'CNT_DRAWINGS_CURRENT_CCB_min_PA_min', 'CNT_DRAWINGS_CURRENT_CCB_min_PA_sum', 'CNT_DRAWINGS_OTHER_CURRENT_CCB_max_PA_max', 'CNT_DRAWINGS_OTHER_CURRENT_CCB_max_PA_min', 'CNT_DRAWINGS_OTHER_CURRENT_CCB_min_PA_max', 'CNT_DRAWINGS_OTHER_CURRENT_CCB_min_PA_min', 'CNT_DRAWINGS_OTHER_CURRENT_CCB_min_PA_sum', 'CNT_DRAWINGS_OTHER_CURRENT_CCB_sum_PA_max', 'CNT_DRAWINGS_OTHER_CURRENT_CCB_sum_PA_min', 'CNT_DRAWINGS_POS_CURRENT_CCB_min_PA_sum', 'CNT_INSTALMENT_MATURE_CUM_CCB_min_PA_max', 'CNT_INSTALMENT_MATURE_CUM_CCB_min_PA_min', 'CNT_INSTALMENT_MATURE_CUM_CCB_min_PA_sum', 'COMMONAREA_AVG', 'COMMONAREA_MEDI', 'COMMONAREA_MODE', 'CREDIT_DAY_OVERDUE_B_min', 'Canceled_NAME_CONTRACT_STATUS_PCB_sum_PA_max', 'Canceled_NAME_CONTRACT_STATUS_PCB_sum_PA_min', 'Canceled_NAME_CONTRACT_STATUS_PCB_sum_PA_sum', 'Car dealer_CHANNEL_TYPE_PA_sum', 'Car repairs_NAME_CASH_LOAN_PURPOSE_PA_sum', 'Cars_NAME_PORTFOLIO_PA_sum', 'Cash loan(non-earmarked)_CREDIT_TYPE_B_sum', 'Cashless from the account of the employer_NAME_PAYMENT_TYPE_PA_sum', 'Cleaning_ORGANIZATION_TYPE', 'Cooking staff_OCCUPATION_TYPE', 'Culture_ORGANIZATION_TYPE', 'DAYS_FIRST_DRAWING_PA_max', 'Demand_NAME_CONTRACT_STATUS_CCB_sum_PA_max', 'Demand_NAME_CONTRACT_STATUS_CCB_sum_PA_min', 'Demand_NAME_CONTRACT_STATUS_CCB_sum_PA_sum', 'Demand_NAME_CONTRACT_STATUS_PCB_sum_PA_min', 'Demand_NAME_CONTRACT_STATUS_PCB_sum_PA_sum', 'Direct Sales_NAME_GOODS_CATEGORY_PA_sum', 'ELEVATORS_AVG', 'ELEVATORS_MEDI', 'ELEVATORS_MODE', 'ENTRANCES_AVG', 'ENTRANCES_MEDI', 'ENTRANCES_MODE', 'Education_NAME_GOODS_CATEGORY_PA_sum', 'Electricity_ORGANIZATION_TYPE', 'Emergency_ORGANIZATION_TYPE', 'Everyday expenses_NAME_CASH_LOAN_PURPOSE_PA_sum', 'FLAG_CONT_MOBILE', 'FLAG_DOCUMENT_15', 'FLAG_DOCUMENT_2', 'FLAG_DOCUMENT_5', 'FLAG_DOCUMENT_9', 'FLAG_EMP_PHONE', 'FLOORSMAX_AVG', 'FLOORSMAX_MEDI', 'FLOORSMAX_MODE', 'FLOORSMIN_AVG', 'FLOORSMIN_MEDI', 'FLOORSMIN_MODE', 'Fitness_NAME_GOODS_CATEGORY_PA_sum', 'Furniture_NAME_CASH_LOAN_PURPOSE_PA_sum', 'Gardening_NAME_GOODS_CATEGORY_PA_sum', 'Gasification / water supply_NAME_CASH_LOAN_PURPOSE_PA_sum', 'Group of people_NAME_TYPE_SUITE_PA_sum', 'Hobby_NAME_CASH_LOAN_PURPOSE_PA_sum', 'House Construction_NAME_GOODS_CATEGORY_PA_sum', 'Industry: type 10_ORGANIZATION_TYPE', 'Industry: type 11_ORGANIZATION_TYPE', 'Industry: type 13_ORGANIZATION_TYPE', 'Industry: type 2_ORGANIZATION_TYPE', 'Industry: type 6_ORGANIZATION_TYPE', 'Industry: type 7_ORGANIZATION_TYPE', 'Industry: type 8_ORGANIZATION_TYPE', 'Insurance_NAME_GOODS_CATEGORY_PA_sum', 'Insurance_ORGANIZATION_TYPE', 'Interbank credit_CREDIT_TYPE_B_sum', 'Journey_NAME_CASH_LOAN_PURPOSE_PA_sum', 'LANDAREA_AVG', 'LANDAREA_MEDI', 'LANDAREA_MODE', 'LIVE_CITY_NOT_WORK_CITY', 'LIVINGAPARTMENTS_AVG', 'LIVINGAPARTMENTS_MEDI', 'LIVINGAPARTMENTS_MODE', 'LIVINGAREA_AVG', 'LIVINGAREA_MEDI', 'LIVINGAREA_MODE', 'Legal Services_ORGANIZATION_TYPE', 'Loan for business development_CREDIT_TYPE_B_sum', 'Loan for purchase of shares(margin lending)_CREDIT_TYPE_B_sum', 'Loan for the purchase of equipment_CREDIT_TYPE_B_sum', 'Loan for working capital replenishment_CREDIT_TYPE_B_sum', 'MLM partners_NAME_SELLER_INDUSTRY_PA_sum', 'MONTHS_BALANCE_CCB_max_PA_min', 'MONTHS_BALANCE_CCB_sum_PA_sum', 'Maternity leave_NAME_INCOME_TYPE', 'Medical Supplies_NAME_GOODS_CATEGORY_PA_sum', 'Medicine_NAME_GOODS_CATEGORY_PA_sum', 'Medicine_ORGANIZATION_TYPE', 'Mobile operator loan_CREDIT_TYPE_B_sum', 'Mobile_ORGANIZATION_TYPE', 'Money for a third person_NAME_CASH_LOAN_PURPOSE_PA_sum', 'Monolithic_WALLSMATERIAL_MODE', 'NONLIVINGAPARTMENTS_AVG', 'NONLIVINGAPARTMENTS_MEDI', 'NONLIVINGAPARTMENTS_MODE', 'NONLIVINGAREA_AVG', 'NONLIVINGAREA_MEDI', 'NONLIVINGAREA_MODE', 'NUM_INSTALMENT_NUMBER_IP_min_PA_max', 'NUM_INSTALMENT_VERSION_IP_min_PA_min', 'Office Appliances_NAME_GOODS_CATEGORY_PA_sum', 'Other_B_NAME_TYPE_SUITE', 'Other_NAME_GOODS_CATEGORY_PA_sum', 'POS others without interest_PRODUCT_COMBINATION_PA_sum', 'Payments on other loans_NAME_CASH_LOAN_PURPOSE_PA_sum', 'Private service staff_OCCUPATION_TYPE', 'Purchase of electronic equipment_NAME_CASH_LOAN_PURPOSE_PA_sum', 'Real estate loan_CREDIT_TYPE_B_sum', 'Refusal to name the goal_NAME_CASH_LOAN_PURPOSE_PA_sum', 'Refused_NAME_CONTRACT_STATUS_CCB_sum_PA_max', 'Refused_NAME_CONTRACT_STATUS_CCB_sum_PA_min', 'Refused_NAME_CONTRACT_STATUS_CCB_sum_PA_sum', 'Religion_ORGANIZATION_TYPE', 'Returned to the store_NAME_CONTRACT_STATUS_PCB_sum_PA_min', 'SK_DPD_CCB_max_PA_max', 'SK_DPD_CCB_sum_PA_sum', 'SK_DPD_DEF_CCB_max_PA_sum', 'SK_DPD_DEF_PCB_min_PA_max', 'SK_DPD_DEF_PCB_min_PA_sum', 'SK_DPD_PCB_min_PA_max', 'SK_DPD_PCB_min_PA_sum', 'SYSTEM_CODE_REJECT_REASON_PA_sum', 'Sent proposal_NAME_CONTRACT_STATUS_CCB_sum_PA_max', 'Sent proposal_NAME_CONTRACT_STATUS_CCB_sum_PA_min', 'Sent proposal_NAME_CONTRACT_STATUS_CCB_sum_PA_sum', 'Services_ORGANIZATION_TYPE', 'Signed_NAME_CONTRACT_STATUS_CCB_sum_PA_max', 'Signed_NAME_CONTRACT_STATUS_CCB_sum_PA_min', 'Signed_NAME_CONTRACT_STATUS_CCB_sum_PA_sum', 'TOTALAREA_MODE', 'Telecom_ORGANIZATION_TYPE', 'Tourism_NAME_GOODS_CATEGORY_PA_sum', 'Tourism_NAME_SELLER_INDUSTRY_PA_sum', 'Trade: type 1_ORGANIZATION_TYPE', 'Trade: type 4_ORGANIZATION_TYPE', 'Trade: type 5_ORGANIZATION_TYPE', 'Trade: type 6_ORGANIZATION_TYPE', 'Transport: type 1_ORGANIZATION_TYPE', 'Transport: type 2_ORGANIZATION_TYPE', 'Unemployed_NAME_INCOME_TYPE', 'University_ORGANIZATION_TYPE', 'Unknown type of loan_CREDIT_TYPE_B_sum', 'Vehicles_NAME_GOODS_CATEGORY_PA_sum', 'Weapon_NAME_GOODS_CATEGORY_PA_sum', 'XNA_NAME_CLIENT_TYPE_PA_sum', 'XNA_NAME_CONTRACT_STATUS_PCB_sum_PA_max', 'XNA_NAME_CONTRACT_STATUS_PCB_sum_PA_sum', 'XNA_NAME_CONTRACT_TYPE_PA_sum', 'YEARS_BEGINEXPLUATATION_AVG', 'YEARS_BEGINEXPLUATATION_MEDI', 'YEARS_BEGINEXPLUATATION_MODE', 'YEARS_BUILD_AVG', 'YEARS_BUILD_MEDI', 'YEARS_BUILD_MODE', 'currency 4_CREDIT_CURRENCY_B_sum', 'missing_NAME_TYPE_SUITE', 'missing_PRODUCT_COMBINATION_PA_sum', '4_STATUS_BB_sum_B_sum', 'Industry: type 5_ORGANIZATION_TYPE', 'N_FLAG_LAST_APPL_PER_CONTRACT_PA_sum', 'AMT_RECEIVABLE_PRINCIPAL_CCB_sum_PA_sum', 'Security_ORGANIZATION_TYPE', 'CNT_DRAWINGS_OTHER_CURRENT_CCB_sum_PA_sum', 'Jewelry_NAME_SELLER_INDUSTRY_PA_sum', 'Completed_NAME_CONTRACT_STATUS_CCB_sum_PA_max'] listfin=list(( set(listetodel)^set(X_train)) &set(X_train)) X_train=X_train[listfin] X_val=X_val[listfin] X_test=X_test[listfin] gc.collect() timer("Variables selected") X_train=pd.concat([X_train,X_val],axis=0) y_train=pd.concat([y_train,y_val],axis=0) del X_val,y_val gc.collect() timer("Data ready for modelling") vect=np.copy(y_train) vect[vect==1]=4 vect[vect==0]=1 clf = LGBMClassifier(learning_rate =0.075, num_boost_round=1500, nthread=8, seed=27,colsample_bytree=1, max_depth=3, min_child_weight=87.5467,min_split_gain=0.0950,num_leaves=22,reg_alpha=0.0019,reg_lambda=0.0406,subsample=0.8709) clf.fit(X_train, y_train, eval_metric= 'auc', verbose= 100) timer("Model created.")
Home Credit Default Risk
1,224,566
<define_variables><EOS>
sub=pd.Series(clf.predict_proba(X_test)[:,1],name="TARGET") sub.loc[sub<0]=0 sub.loc[sub>1]=1 sub.index=indtest.index submission=pd.concat([indtest,sub],axis=1) submission.to_csv('submission.csv', index=False )
Home Credit Default Risk
1,396,051
<choose_model_class><EOS>
warnings.simplefilter(action='ignore', category=FutureWarning) @contextmanager def timer(title): t0 = time.time() yield print("{} - done in {:.0f}s".format(title, time.time() - t0)) def one_hot_encoder(df, nan_as_category = True): original_columns = list(df.columns) categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category) new_columns = [c for c in df.columns if c not in original_columns] return df, new_columns def application_train_test(num_rows = None, nan_as_category = False): df = pd.read_csv('.. /input/application_train.csv', nrows= num_rows) test_df = pd.read_csv('.. /input/application_test.csv', nrows= num_rows) print("Train samples: {}, test samples: {}".format(len(df), len(test_df))) df = df.append(test_df ).reset_index() df = df[df['CODE_GENDER'] != 'XNA'] docs = [_f for _f in df.columns if 'FLAG_DOC' in _f] live = [_f for _f in df.columns if('FLAG_' in _f)&('FLAG_DOC' not in _f)&('_FLAG_' not in _f)] df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True) inc_by_org = df[['AMT_INCOME_TOTAL', 'ORGANIZATION_TYPE']].groupby('ORGANIZATION_TYPE' ).median() ['AMT_INCOME_TOTAL'] df['NEW_CREDIT_TO_ANNUITY_RATIO'] = df['AMT_CREDIT'] / df['AMT_ANNUITY'] df['NEW_CREDIT_TO_GOODS_RATIO'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE'] df['NEW_DOC_IND_KURT'] = df[docs].kurtosis(axis=1) df['NEW_LIVE_IND_SUM'] = df[live].sum(axis=1) df['NEW_INC_PER_CHLD'] = df['AMT_INCOME_TOTAL'] /(1 + df['CNT_CHILDREN']) df['NEW_INC_BY_ORG'] = df['ORGANIZATION_TYPE'].map(inc_by_org) df['NEW_EMPLOY_TO_BIRTH_RATIO'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH'] df['NEW_ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] /(1 + df['AMT_INCOME_TOTAL']) df['NEW_SOURCES_PROD'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3'] df['NEW_EXT_SOURCES_MEAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1) df['NEW_SCORES_STD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1) df['NEW_SCORES_STD'] = df['NEW_SCORES_STD'].fillna(df['NEW_SCORES_STD'].mean()) df['NEW_CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH'] df['NEW_CAR_TO_EMPLOY_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED'] df['NEW_PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH'] df['NEW_PHONE_TO_BIRTH_RATIO_EMPLOYER'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED'] df['NEW_CREDIT_TO_INCOME_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL'] for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']: df[bin_feature], uniques = pd.factorize(df[bin_feature]) df, cat_cols = one_hot_encoder(df, nan_as_category) dropcolum=['FLAG_DOCUMENT_2','FLAG_DOCUMENT_4', 'FLAG_DOCUMENT_5','FLAG_DOCUMENT_6','FLAG_DOCUMENT_7', 'FLAG_DOCUMENT_8','FLAG_DOCUMENT_9','FLAG_DOCUMENT_10', 'FLAG_DOCUMENT_11','FLAG_DOCUMENT_12','FLAG_DOCUMENT_13', 'FLAG_DOCUMENT_14','FLAG_DOCUMENT_15','FLAG_DOCUMENT_16', 'FLAG_DOCUMENT_17','FLAG_DOCUMENT_18','FLAG_DOCUMENT_19', 'FLAG_DOCUMENT_20','FLAG_DOCUMENT_21'] df= df.drop(dropcolum,axis=1) del test_df gc.collect() return df def bureau_and_balance(num_rows = None, nan_as_category = True): bureau = pd.read_csv('.. /input/bureau.csv', nrows = num_rows) bb = pd.read_csv('.. /input/bureau_balance.csv', nrows = num_rows) bb, bb_cat = one_hot_encoder(bb, nan_as_category) bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category) bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']} for col in bb_cat: bb_aggregations[col] = ['mean'] bb_agg = bb.groupby('SK_ID_BUREAU' ).agg(bb_aggregations) bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist() ]) bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU') bureau.drop(['SK_ID_BUREAU'], axis=1, inplace= True) del bb, bb_agg gc.collect() num_aggregations = { 'DAYS_CREDIT': [ 'mean', 'var'], 'DAYS_CREDIT_ENDDATE': [ 'mean'], 'DAYS_CREDIT_UPDATE': ['mean'], 'CREDIT_DAY_OVERDUE': ['mean'], 'AMT_CREDIT_MAX_OVERDUE': ['mean'], 'AMT_CREDIT_SUM': [ 'mean', 'sum'], 'AMT_CREDIT_SUM_DEBT': [ 'mean', 'sum'], 'AMT_CREDIT_SUM_OVERDUE': ['mean'], 'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'], 'AMT_ANNUITY': ['max', 'mean'], 'CNT_CREDIT_PROLONG': ['sum'], 'MONTHS_BALANCE_MIN': ['min'], 'MONTHS_BALANCE_MAX': ['max'], 'MONTHS_BALANCE_SIZE': ['mean', 'sum'] } cat_aggregations = {} for cat in bureau_cat: cat_aggregations[cat] = ['mean'] for cat in bb_cat: cat_aggregations[cat + "_MEAN"] = ['mean'] bureau_agg = bureau.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations}) bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist() ]) active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1] active_agg = active.groupby('SK_ID_CURR' ).agg(num_aggregations) active_agg.columns = pd.Index(['ACTIVE_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist() ]) bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR') del active, active_agg gc.collect() closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1] closed_agg = closed.groupby('SK_ID_CURR' ).agg(num_aggregations) closed_agg.columns = pd.Index(['CLOSED_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist() ]) bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR') del closed, closed_agg, bureau gc.collect() return bureau_agg def previous_applications(num_rows = None, nan_as_category = True): prev = pd.read_csv('.. /input/previous_application.csv', nrows = num_rows) prev, cat_cols = one_hot_encoder(prev, nan_as_category= True) prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True) prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True) prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True) prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True) prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True) prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT'] num_aggregations = { 'AMT_ANNUITY': [ 'max', 'mean'], 'AMT_APPLICATION': [ 'max','mean'], 'AMT_CREDIT': [ 'max', 'mean'], 'APP_CREDIT_PERC': [ 'max', 'mean'], 'AMT_DOWN_PAYMENT': [ 'max', 'mean'], 'AMT_GOODS_PRICE': [ 'max', 'mean'], 'HOUR_APPR_PROCESS_START': [ 'max', 'mean'], 'RATE_DOWN_PAYMENT': [ 'max', 'mean'], 'DAYS_DECISION': [ 'max', 'mean'], 'CNT_PAYMENT': ['mean', 'sum'], } cat_aggregations = {} for cat in cat_cols: cat_aggregations[cat] = ['mean'] prev_agg = prev.groupby('SK_ID_CURR' ).agg({**num_aggregations, **cat_aggregations}) prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist() ]) approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1] approved_agg = approved.groupby('SK_ID_CURR' ).agg(num_aggregations) approved_agg.columns = pd.Index(['APPROVED_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist() ]) prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR') refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1] refused_agg = refused.groupby('SK_ID_CURR' ).agg(num_aggregations) refused_agg.columns = pd.Index(['REFUSED_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist() ]) prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR') del refused, refused_agg, approved, approved_agg, prev gc.collect() return prev_agg def pos_cash(num_rows = None, nan_as_category = True): pos = pd.read_csv('.. /input/POS_CASH_balance.csv', nrows = num_rows) pos, cat_cols = one_hot_encoder(pos, nan_as_category= True) aggregations = { 'MONTHS_BALANCE': ['max', 'mean', 'size'], 'SK_DPD': ['max', 'mean'], 'SK_DPD_DEF': ['max', 'mean'] } for cat in cat_cols: aggregations[cat] = ['mean'] pos_agg = pos.groupby('SK_ID_CURR' ).agg(aggregations) pos_agg.columns = pd.Index(['POS_' + e[0] + "_" + e[1].upper() for e in pos_agg.columns.tolist() ]) pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR' ).size() del pos gc.collect() return pos_agg def installments_payments(num_rows = None, nan_as_category = True): ins = pd.read_csv('.. /input/installments_payments.csv', nrows = num_rows) ins, cat_cols = one_hot_encoder(ins, nan_as_category= True) ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT'] ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT'] ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT'] ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT'] ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0) ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0) aggregations = { 'NUM_INSTALMENT_VERSION': ['nunique'], 'DPD': ['max', 'mean', 'sum','min','std' ], 'DBD': ['max', 'mean', 'sum','min','std'], 'PAYMENT_PERC': [ 'max','mean', 'var','min','std'], 'PAYMENT_DIFF': [ 'max','mean', 'var','min','std'], 'AMT_INSTALMENT': ['max', 'mean', 'sum','min','std'], 'AMT_PAYMENT': ['min', 'max', 'mean', 'sum','std'], 'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum','std'] } for cat in cat_cols: aggregations[cat] = ['mean'] ins_agg = ins.groupby('SK_ID_CURR' ).agg(aggregations) ins_agg.columns = pd.Index(['INSTAL_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist() ]) ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR' ).size() del ins gc.collect() return ins_agg def credit_card_balance(num_rows = None, nan_as_category = True): cc = pd.read_csv('.. /input/credit_card_balance.csv', nrows = num_rows) cc, cat_cols = one_hot_encoder(cc, nan_as_category= True) cc.drop(['SK_ID_PREV'], axis= 1, inplace = True) cc_agg = cc.groupby('SK_ID_CURR' ).agg([ 'max', 'mean', 'sum', 'var']) cc_agg.columns = pd.Index(['CC_' + e[0] + "_" + e[1].upper() for e in cc_agg.columns.tolist() ]) cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR' ).size() del cc gc.collect() return cc_agg def kfold_lightgbm(df, num_folds, stratified = False, debug= False): train_df = df[df['TARGET'].notnull() ] test_df = df[df['TARGET'].isnull() ] print("Starting LightGBM.Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape)) del df gc.collect() if stratified: folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=47) else: folds = KFold(n_splits= num_folds, shuffle=True, random_state=47) oof_preds = np.zeros(train_df.shape[0]) sub_preds = np.zeros(test_df.shape[0]) feature_importance_df = pd.DataFrame() feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']] for n_fold,(train_idx, valid_idx)in enumerate(folds.split(train_df[feats], train_df['TARGET'])) : train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx] valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx] clf = LGBMClassifier( nthread=4, objective = 'binary', n_estimators=1400, max_bin = 255, learning_rate=0.01, num_leaves = 31, min_data_in_leaf = 1500, feature_fraction = 0.7, bagging_freq = 1, bagging_fraction = 0.7, lambda_l1 = 1, lambda_l2=1, ) clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric= 'auc', verbose= 1000, early_stopping_rounds= 200) oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration_)[:, 1] sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration_)[:, 1] / folds.n_splits fold_importance_df = pd.DataFrame() fold_importance_df["feature"] = feats fold_importance_df["importance"] = clf.feature_importances_ fold_importance_df["fold"] = n_fold + 1 feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0) print('Fold %2d AUC : %.6f' %(n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx]))) del clf, train_x, train_y, valid_x, valid_y gc.collect() print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds)) if not debug: test_df['TARGET'] = sub_preds test_df[['SK_ID_CURR', 'TARGET']].to_csv(submission_file_name, index= False) display_importances(feature_importance_df) return feature_importance_df def display_importances(feature_importance_df_): cols = feature_importance_df_[["feature", "importance"]].groupby("feature" ).mean().sort_values(by="importance", ascending=False)[:40].index best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)] plt.figure(figsize=(8, 10)) sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False)) plt.title('LightGBM Features(avg over folds)') plt.tight_layout() plt.savefig('lgbm_importances01.png') def main(debug = False): num_rows = 10000 if debug else None df = application_train_test(num_rows) with timer("Process bureau and bureau_balance"): bureau = bureau_and_balance(num_rows) print("Bureau df shape:", bureau.shape) df = df.join(bureau, how='left', on='SK_ID_CURR') del bureau gc.collect() with timer("Process previous_applications"): prev = previous_applications(num_rows) print("Previous applications df shape:", prev.shape) df = df.join(prev, how='left', on='SK_ID_CURR') del prev gc.collect() with timer("Process POS-CASH balance"): pos = pos_cash(num_rows) print("Pos-cash balance df shape:", pos.shape) df = df.join(pos, how='left', on='SK_ID_CURR') del pos gc.collect() with timer("Process installments payments"): ins = installments_payments(num_rows) print("Installments payments df shape:", ins.shape) df = df.join(ins, how='left', on='SK_ID_CURR') del ins gc.collect() with timer("Process credit card balance"): cc = credit_card_balance(num_rows) print("Credit card balance df shape:", cc.shape) df = df.join(cc, how='left', on='SK_ID_CURR') del cc gc.collect() with timer("Run LightGBM with kfold"): feat_importance = kfold_lightgbm(df, num_folds= 5, stratified= False, debug= debug) if __name__ == "__main__": submission_file_name = "submission_kerne2.csv" with timer("Full model run"): main()
Home Credit Default Risk
21,768,463
results.to_csv('submission.csv',index=False )<load_from_csv>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head()
Titanic - Machine Learning from Disaster
21,768,463
X_train = np.loadtxt("/kaggle/input/roxie2/roxie_train_features.csv", delimiter=",")[:,1:] X_test = np.loadtxt("/kaggle/input/roxie2/roxie_test_features.csv", delimiter=",") ids_test = X_test[:,(0,)] X_test = X_test[:,1:] y_train = np.loadtxt("/kaggle/input/roxie2/roxie_train_values.csv", delimiter=",", ndmin=2)[:,(1,)] mdl = KNeighborsRegressor() mdl.fit(X_train, y_train) y_pred = mdl.predict(X_test )<save_to_csv>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.head()
Titanic - Machine Learning from Disaster
21,768,463
output = np.concatenate(( ids_test, y_pred), axis=1) np.savetxt("submission_knn.csv", output, delimiter=",", fmt='%i,%1.4f', header='ID,intensity' )<load_from_csv>
women = train_data.loc[train_data.Sex == 'female']["Survived"] rate_women = sum(women)/len(women) print("% of women who survived:", rate_women )
Titanic - Machine Learning from Disaster
21,768,463
X_full = np.loadtxt("/kaggle/input/roxie2/roxie_full_features.csv", delimiter=",") y_pred = mdl.predict(X_full) plt.figure(figsize=(10,15)) plt.imshow(y_pred.reshape(( 650,430,3)) /255 )<set_options>
men = train_data.loc[train_data.Sex == 'male']["Survived"] rate_men = sum(men)/len(men) print("% of men who survived:", rate_men )
Titanic - Machine Learning from Disaster
21,768,463
%config InlineBackend.figure_format = 'retina' %matplotlib inline<load_from_csv>
y = train_data["Survived"] features = ["Pclass", "Sex", "SibSp", "Parch"] X = pd.get_dummies(train_data[features]) X_test = pd.get_dummies(test_data[features]) model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model.fit(X, y) predictions = model.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
21,768,463
print(os.listdir(".. /input")) local = 0 if(local): train = pd.read_csv("input/train.csv") test = pd.read_csv("input/test.csv") else: train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv") <drop_column>
print("Before", train_data.shape, test_data.shape) train_data = train_data.drop(['Ticket', 'Cabin'], axis=1) test_data = test_data.drop(['Ticket', 'Cabin'], axis=1) combine = [train_data, test_data] "After", train_data.shape, test_data.shape
Titanic - Machine Learning from Disaster
21,768,463
train_clean = train.drop(['ID','price'],1) test_clean = test.drop('ID',1) test_clean.head() train_clean.shape<concatenate>
combine = [train_data, test_data]
Titanic - Machine Learning from Disaster
21,768,463
all_data = pd.concat(( train_clean[:],test_clean[:])) all_data["caret_sqroot"] = np.sqrt(all_data["carat"]) all_data["caret_cubtroot"] = all_data.carat **(1/3) all_data.shape <feature_engineering>
for dataset in combine: dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False) pd.crosstab(train_data['Title'], train_data['Sex'] )
Titanic - Machine Learning from Disaster
21,768,463
train["logprice"] = np.log1p(train["price"]) numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index print(numeric_feats )<feature_engineering>
for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',\ 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') train_data[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
21,768,463
<categorify>
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train_data.head()
Titanic - Machine Learning from Disaster
21,768,463
all_data = pd.get_dummies(all_data) all_data.head()<data_type_conversions>
train_data = train_data.drop(['Name', 'PassengerId'], axis=1) test_data = test_data.drop(['Name'], axis=1) combine = [train_data, test_data] train_data.shape, test_data.shape
Titanic - Machine Learning from Disaster
21,768,463
all_data = all_data.fillna(all_data.mean() )<prepare_x_and_y>
for dataset in combine: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0} ).astype(int) train_data.head()
Titanic - Machine Learning from Disaster