kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
1,457,238
column_with_missing = [col for col in X_test.columns if X_test[col].isnull().any() ]<correct_missing_values>
dataset['AMT_REQ_CREDIT_BUREAU_HOUR_outlier'] = dataset['AMT_REQ_CREDIT_BUREAU_HOUR'] > 1 for i in dataset['AMT_REQ_CREDIT_BUREAU_HOUR']: if i > 1: dataset['AMT_REQ_CREDIT_BUREAU_HOUR'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
X_test.dropna(axis=0, subset=['ID'], inplace=True )<drop_column>
dataset['AMT_REQ_CREDIT_BUREAU_DAY_outlier'] = dataset['AMT_REQ_CREDIT_BUREAU_DAY'] > 2 for i in dataset['AMT_REQ_CREDIT_BUREAU_DAY']: if i > 2: dataset['AMT_REQ_CREDIT_BUREAU_DAY'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
X_test.drop(['ID'], axis=1, inplace=True )<predict_on_test>
dataset['AMT_REQ_CREDIT_BUREAU_WEEK_outlier'] = dataset['AMT_REQ_CREDIT_BUREAU_WEEK'] > 2 for i in dataset['AMT_REQ_CREDIT_BUREAU_WEEK']: if i > 2: dataset['AMT_REQ_CREDIT_BUREAU_WEEK'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
preds_test = model.predict(X_test )<save_to_csv>
dataset['AMT_REQ_CREDIT_BUREAU_MON_outlier'] = dataset['AMT_REQ_CREDIT_BUREAU_MON'] > 5 for i in dataset['AMT_REQ_CREDIT_BUREAU_MON']: if i > 5: dataset['AMT_REQ_CREDIT_BUREAU_MON'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
output = pd.DataFrame({'ID': X_test.index+1,'Revenue': preds_test}) X_test.to_csv('new_submission.csv', index=None,header=True )<feature_engineering>
dataset['AMT_REQ_CREDIT_BUREAU_QRT_outlier'] = dataset['AMT_REQ_CREDIT_BUREAU_QRT'] > 5 for i in dataset['AMT_REQ_CREDIT_BUREAU_QRT']: if i > 5: dataset['AMT_REQ_CREDIT_BUREAU_QRT'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
X_set['exit']= X_set['ExitRates'].apply(newly) X_test['exit']= X_set['ExitRates'].apply(newly) <save_to_csv>
dataset['AMT_REQ_CREDIT_BUREAU_YEAR_outlier'] = dataset['AMT_REQ_CREDIT_BUREAU_YEAR'] > 10 for i in dataset['AMT_REQ_CREDIT_BUREAU_YEAR']: if i > 10: dataset['AMT_REQ_CREDIT_BUREAU_YEAR'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
train_with_4 = pd.read_csv('.. /input/train.csv') train_with_4['target'] = train_with_4['target']/4 train_with_4.to_csv('train.csv', index = False )<load_from_csv>
dataset['EMPLOY_AGE'] = dataset['DAYS_EMPLOYED'] / dataset['DAYS_BIRTH'] dataset['INCOME_AGE'] = dataset['AMT_INCOME_TOTAL'] / dataset['DAYS_BIRTH'] dataset['CREDIT_AGE'] = dataset['AMT_CREDIT'] / dataset['DAYS_BIRTH'] dataset['CREDIT_INCOME'] = dataset['AMT_CREDIT'] / dataset['AMT_INCOME_TOTAL'] dataset['ANNUITY_INCOME'] = dataset['AMT_ANNUITY'] / dataset['AMT_INCOME_TOTAL'] dataset['ANNUITY_CREDIT'] = dataset['AMT_ANNUITY'] / dataset['AMT_CREDIT']
Home Credit Default Risk
1,457,238
train_with_1 = pd.read_csv('train.csv') train_with_1.head()<categorify>
bureau = pd.read_csv('.. /input/bureau.csv') bureau.head()
Home Credit Default Risk
1,457,238
def generate_bigrams(x): n_grams = set(zip(*[x[i:] for i in range(2)])) for n_gram in n_grams: x.append(' '.join(n_gram)) return x<categorify>
dataset = dataset.merge(BUREAU_count, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_count']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
generate_bigrams(['This', 'film', 'is', 'terrible'] )<prepare_x_and_y>
DAYS_CREDIT_max = bureau.groupby('SK_ID_CURR', as_index=False)['DAYS_CREDIT'].max().rename(columns = {'DAYS_CREDIT': 'bureau_DAYS_CREDIT_max'}) DAYS_CREDIT_max.head()
Home Credit Default Risk
1,457,238
SEED = 1234 torch.manual_seed(SEED) torch.backends.cudnn.deterministic = True TEXT = data.Field(preprocessing = generate_bigrams) TARGET = data.LabelField(dtype = torch.float )<define_variables>
dataset = dataset.merge(DAYS_CREDIT_max, on = 'SK_ID_CURR', how = 'left' )
Home Credit Default Risk
1,457,238
fields_train = [(None, None),(None, None),(None, None),('text', TEXT),('target', TARGET)]<load_from_csv>
CREDIT_DAY_OVERDUE_max = bureau.groupby('SK_ID_CURR', as_index=False)['CREDIT_DAY_OVERDUE'].max().rename(columns = {'CREDIT_DAY_OVERDUE': 'bureau_CREDIT_DAY_OVERDUE_max'} )
Home Credit Default Risk
1,457,238
train_data = data.TabularDataset(path = 'train.csv', format = 'csv', fields = fields_train, skip_header = True )<split>
CREDIT_DAY_OVERDUE_max[CREDIT_DAY_OVERDUE_max['bureau_CREDIT_DAY_OVERDUE_max'] > 0].count()
Home Credit Default Risk
1,457,238
train_data, valid_data = train_data.split(random_state = random.seed(SEED))<categorify>
CREDIT_DAY_OVERDUE_max['bureau_CREDIT_DAY_OVERDUE_max_flag'] = CREDIT_DAY_OVERDUE_max['bureau_CREDIT_DAY_OVERDUE_max'].where(CREDIT_DAY_OVERDUE_max['bureau_CREDIT_DAY_OVERDUE_max']==0,other=1 )
Home Credit Default Risk
1,457,238
MAX_VOCAB_SIZE = 25_000 TEXT.build_vocab(train_data, max_size = MAX_VOCAB_SIZE, vectors = "glove.6B.100d", unk_init = torch.Tensor.normal_) TARGET.build_vocab(train_data )<define_variables>
CREDIT_DAY_OVERDUE_max = CREDIT_DAY_OVERDUE_max.drop('bureau_CREDIT_DAY_OVERDUE_max', axis=1) dataset = dataset.merge(CREDIT_DAY_OVERDUE_max, on = 'SK_ID_CURR', how = 'left' )
Home Credit Default Risk
1,457,238
BATCH_SIZE = 64 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') train_iterator = data.Iterator(dataset = train_data, batch_size = BATCH_SIZE,device = device, shuffle = None, train = True, sort_key = lambda x: len(x.text), sort_within_batch = False) valid_iterator = data.Iterator(dataset = valid_data, batch_size = BATCH_SIZE,device = device, shuffle = None, train = False, sort_key = lambda x: len(x.text), sort_within_batch = False )<categorify>
DAYS_CREDIT_ENDDATE_max['bureau_DAYS_CREDIT_ENDDATE_max_outlier'] = DAYS_CREDIT_ENDDATE_max['bureau_DAYS_CREDIT_ENDDATE_max'] < -10000 for i in DAYS_CREDIT_ENDDATE_max['bureau_DAYS_CREDIT_ENDDATE_max']: if i < -10000: DAYS_CREDIT_ENDDATE_max['bureau_DAYS_CREDIT_ENDDATE_max'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
class FastText(nn.Module): def __init__(self, vocab_size, embedding_dim, output_dim, pad_idx): super().__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx) self.fc = nn.Linear(embedding_dim, output_dim) def forward(self, text): embedded = self.embedding(text) embedded = embedded.permute(1, 0, 2) pooled = F.avg_pool2d(embedded,(embedded.shape[1], 1)).squeeze(1) return self.fc(pooled )<categorify>
dataset = dataset.merge(DAYS_CREDIT_ENDDATE_max, on = 'SK_ID_CURR', how = 'left' )
Home Credit Default Risk
1,457,238
INPUT_DIM = len(TEXT.vocab) EMBEDDING_DIM = 100 OUTPUT_DIM = 1 PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token] model = FastText(INPUT_DIM, EMBEDDING_DIM, OUTPUT_DIM, PAD_IDX )<find_best_params>
DAYS_ENDDATE_FACT_mean['bureau_DAYS_ENDDATE_FACT_mean_outlier'] = DAYS_ENDDATE_FACT_mean['bureau_DAYS_ENDDATE_FACT_mean'] < -4000 for i in DAYS_ENDDATE_FACT_mean['bureau_DAYS_ENDDATE_FACT_mean']: if i < -4000: DAYS_ENDDATE_FACT_mean['bureau_DAYS_ENDDATE_FACT_mean'].replace({i: np.nan}, inplace = True )
Home Credit Default Risk
1,457,238
def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'The model has {count_parameters(model):,} trainable parameters' )<load_pretrained>
dataset = dataset.merge(DAYS_ENDDATE_FACT_mean, on = 'SK_ID_CURR', how = 'left' )
Home Credit Default Risk
1,457,238
pretrained_embeddings = TEXT.vocab.vectors model.embedding.weight.data.copy_(pretrained_embeddings )<feature_engineering>
dataset = dataset.merge(AMT_CREDIT_MAX_OVERDUE_max, on = 'SK_ID_CURR', how = 'left') dataset = dataset.merge(AMT_CREDIT_MAX_OVERDUE_mean, on = 'SK_ID_CURR', how = 'left' )
Home Credit Default Risk
1,457,238
UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token] model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM) model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM )<choose_model_class>
dataset = dataset.merge(CNT_CREDIT_PROLONG_sum, on = 'SK_ID_CURR', how = 'left' )
Home Credit Default Risk
1,457,238
optimizer = optim.Adam(model.parameters() )<find_best_params>
dataset = dataset.merge(AMT_CREDIT_SUM_mean, on = 'SK_ID_CURR', how = 'left' )
Home Credit Default Risk
1,457,238
criterion = nn.BCEWithLogitsLoss() model = model.to(device) criterion = criterion.to(device )<compute_test_metric>
dataset = dataset.merge(AMT_CREDIT_SUM_DEBT_mean, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_AMT_CREDIT_SUM_DEBT_mean']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
def binary_accuracy(preds, y): rounded_preds = torch.round(torch.sigmoid(preds)) correct =(rounded_preds == y ).float() acc = correct.sum() / len(correct) return acc<train_model>
dataset = dataset.merge(AMT_CREDIT_SUM_LIMIT_mean, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_AMT_CREDIT_SUM_LIMIT_mean']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
def train(model, iterator, optimizer, criterion): epoch_loss = 0 epoch_acc = 0 model.train() for batch in iterator: optimizer.zero_grad() predictions = model(batch.text ).squeeze(1) loss = criterion(predictions, batch.target) acc = binary_accuracy(predictions, batch.target) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator )<compute_train_metric>
dataset = dataset.merge(AMT_CREDIT_SUM_OVERDUE_mean, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_AMT_CREDIT_SUM_OVERDUE_mean']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
def evaluate(model, iterator, criterion): epoch_loss = 0 epoch_acc = 0 model.eval() with torch.no_grad() : for batch in iterator: predictions = model(batch.text ).squeeze(1) loss = criterion(predictions, batch.target) acc = binary_accuracy(predictions, batch.target) epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator )<train_model>
dataset = dataset.merge(DAYS_CREDIT_UPDATE_max, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_DAYS_CREDIT_UPDATE_max']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
def epoch_time(start_time, end_time): elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time -(elapsed_mins * 60)) return elapsed_mins, elapsed_secs<train_model>
dataset = dataset.merge(AMT_ANNUITY_mean, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_AMT_ANNUITY_mean']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
N_EPOCHS = 20 best_valid_loss = float('inf') for epoch in range(N_EPOCHS): start_time = time.time() train_loss, train_acc = train(model, train_iterator, optimizer, criterion) valid_loss, valid_acc = evaluate(model, valid_iterator, criterion) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict() , 'tut3-model.pt') print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s') print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%') print(f'\t Val.Loss: {valid_loss:.3f} | Val.Acc: {valid_acc*100:.2f}%' )<predict_on_test>
bureau_cats = pd.get_dummies(bureau.select_dtypes('object')) bureau_cats['SK_ID_CURR'] = bureau['SK_ID_CURR'] bureau_cats.head()
Home Credit Default Risk
1,457,238
def predict_sentiment(model, sentence): model.eval() tokenized = generate_bigrams(sentence.split()) indexed = [TEXT.vocab.stoi[t] for t in tokenized] tensor = torch.LongTensor(indexed ).to(device) tensor = tensor.unsqueeze(1) prediction = torch.sigmoid(model(tensor)) return prediction.item()<save_to_csv>
bureau_cats_grouped = bureau_cats.groupby('SK_ID_CURR' ).agg('sum') bureau_cats_grouped.head()
Home Credit Default Risk
1,457,238
preds = [] test_data = pd.read_csv('.. /input/test.csv') for i in range(len(test_data)) : preds.append(( int(predict_sentiment(model, test_data['text'][i])>0.5)) *4) ids = test_data['Id'] dict = {'Id': ids, 'target': preds} df = pd.DataFrame(dict) df.to_csv('final_output.csv', index=False )<load_from_csv>
dataset = dataset.merge(bureau_cats_grouped, on = 'SK_ID_CURR', right_index = True, how = 'left') dataset.head()
Home Credit Default Risk
1,457,238
test_data = pd.read_csv('final_output.csv') test_data.head()<categorify>
bureau_balance = pd.read_csv('.. /input/bureau_balance.csv') bureau_balance.head()
Home Credit Default Risk
1,457,238
class VowelConsonantDataset(Dataset): def __init__(self, file_path,train=True,transform=None): self.transform = transform self.file_path=file_path self.train=train self.file_names=[file for _,_,files in os.walk(self.file_path)for file in files] self.len = len(self.file_names) if self.train: self.classes_mapping=self.get_classes() def __len__(self): return len(self.file_names) def __getitem__(self, index): file_name=self.file_names[index] image_data=self.pil_loader(self.file_path+"/"+file_name) if self.transform: image_data = self.transform(image_data) if self.train: file_name_splitted=file_name.split("_") Y1 = self.classes_mapping[file_name_splitted[0]] Y2 = self.classes_mapping[file_name_splitted[1]] z1,z2=torch.zeros(10),torch.zeros(10) z1[Y1-10],z2[Y2]=1,1 label=torch.stack([z1,z2]) return image_data, label else: return image_data, file_name def pil_loader(self,path): with open(path, 'rb')as f: img = Image.open(f) return img.convert('RGB') def get_classes(self): classes=[] for name in self.file_names: name_splitted=name.split("_") classes.extend([name_splitted[0],name_splitted[1]]) classes=list(set(classes)) classes_mapping={} for i,cl in enumerate(sorted(classes)) : classes_mapping[cl]=i return classes_mapping <set_options>
MONTHS_BAL = MONTHS_BALANCE_mean.merge(MONTHS_BALANCE_count, on = 'SK_ID_BUREAU', right_index = True, how = 'inner') MONTHS_BAL.head()
Home Credit Default Risk
1,457,238
train_on_gpu = torch.cuda.is_available()<categorify>
bureau_bal_cats = pd.get_dummies(bureau_balance.select_dtypes('object')) bureau_bal_cats['SK_ID_BUREAU'] = bureau_balance['SK_ID_BUREAU'] bureau_bal_cats.head()
Home Credit Default Risk
1,457,238
transform_train = transforms.Compose([ transforms.Resize(224), transforms.ToTensor() , transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] )<load_pretrained>
bureau_bal_cats_grouped = bureau_bal_cats.groupby('SK_ID_BUREAU' ).agg('sum') bureau_bal_cats_grouped.head()
Home Credit Default Risk
1,457,238
full_data = VowelConsonantDataset(".. /input/train/train",train=True,transform=transform_train) train_size = int(0.95 * len(full_data)) test_size = len(full_data)- train_size train_data, validation_data = random_split(full_data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(train_data, batch_size=20, shuffle=True) validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=20, shuffle=True )<create_dataframe>
bureau_bal_merged = MONTHS_BAL.merge(bureau_bal_cats_grouped, right_index = True, left_on = 'SK_ID_BUREAU', how = 'outer') bureau_bal_merged = bureau_bal_merged.merge(bureau[['SK_ID_BUREAU', 'SK_ID_CURR']], on = 'SK_ID_BUREAU', how = 'left') bureau_bal_merged.head()
Home Credit Default Risk
1,457,238
test_data = VowelConsonantDataset(".. /input/test/test",train=False,transform=transform_train) test_loader = torch.utils.data.DataLoader(test_data, batch_size=20,shuffle=False )<set_options>
bureau_bal_MONTHS_BALANCE_mean_mean = bureau_bal_merged.groupby('SK_ID_CURR', as_index=False)['bureau_bal_MONTHS_BALANCE_mean'].mean().rename(columns = {'bureau_bal_MONTHS_BALANCE_mean': 'bureau_bal_MONTHS_BALANCE_mean_mean'}) dataset = dataset.merge(bureau_bal_MONTHS_BALANCE_mean_mean, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_bal_MONTHS_BALANCE_mean_mean']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device )<choose_model_class>
bureau_bal_MONTHS_BALANCE_count_mean = bureau_bal_merged.groupby('SK_ID_CURR', as_index=False)['bureau_bal_MONTHS_BALANCE_count'].mean().rename(columns = {'bureau_bal_MONTHS_BALANCE_count': 'bureau_bal_MONTHS_BALANCE_count_mean'}) dataset = dataset.merge(bureau_bal_MONTHS_BALANCE_count_mean, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_bal_MONTHS_BALANCE_count_mean']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
class MyModel(nn.Module): def __init__(self, num_classes1, num_classes2): super(MyModel, self ).__init__() self.model_resnet = models.resnet50(pretrained=True) num_ftrs = self.model_resnet.fc.in_features self.model_resnet.fc = nn.Sequential() self.fc1 = nn.Sequential( nn.BatchNorm1d(num_ftrs), nn.Dropout(0.3), nn.Linear(in_features=num_ftrs, out_features=1000,bias=True), nn.ReLU() , nn.BatchNorm1d(1000, eps=1e-05, momentum=0.3), nn.Dropout(0.3), nn.Linear(in_features=1000, out_features=10,bias=True)) self.fc2 = nn.Sequential( nn.BatchNorm1d(num_ftrs), nn.Dropout(0.3), nn.Linear(in_features=num_ftrs,out_features=1000,bias=True), nn.ReLU() , nn.BatchNorm1d(1000, eps=1e-05, momentum=0.3), nn.Dropout(0.3), nn.Linear(in_features=1000, out_features=10,bias=True)) def forward(self, x): x = self.model_resnet(x) out1 = self.fc1(x) out2 = self.fc2(x) return out1, out2<choose_model_class>
bureau_bal_STATUS_0_mean = bureau_bal_merged.groupby('SK_ID_CURR', as_index=False)['STATUS_0'].mean().rename(columns = {'STATUS_0': 'bureau_bal_STATUS_0_mean'}) dataset = dataset.merge(bureau_bal_STATUS_0_mean, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_bal_STATUS_0_mean']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
my_model = MyModel(10,10 )<train_model>
bureau_bal_STATUS_1_mean = bureau_bal_merged.groupby('SK_ID_CURR', as_index=False)['STATUS_1'].mean().rename(columns = {'STATUS_1': 'bureau_bal_STATUS_1_mean'}) dataset = dataset.merge(bureau_bal_STATUS_1_mean, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_bal_STATUS_1_mean']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
my_model = my_model.to(device )<categorify>
bureau_bal_STATUS_2_mean = bureau_bal_merged.groupby('SK_ID_CURR', as_index=False)['STATUS_2'].mean().rename(columns = {'STATUS_2': 'bureau_bal_STATUS_2_mean'}) dataset = dataset.merge(bureau_bal_STATUS_2_mean, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_bal_STATUS_2_mean']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
def evaluation(dataloader,model): total,correct=0,0 for data in dataloader: inputs,labels=data inputs,labels=inputs.to(device),labels.to(device) out1,out2=my_model(inputs) _,pred1=torch.max(out1.data,1) _,pred2=torch.max(out2.data,1) _,labels1=torch.max(labels[:,0,:].data,1) _,labels2=torch.max(labels[:,1,:].data,1) total+=labels.size(0) fin1=(pred1==labels1) fin2=(pred2==labels2) correct+=(fin1==fin2 ).sum().item() return 100*correct/total<choose_model_class>
bureau_bal_STATUS_3_mean = bureau_bal_merged.groupby('SK_ID_CURR', as_index=False)['STATUS_3'].mean().rename(columns = {'STATUS_3': 'bureau_bal_STATUS_3_mean'}) dataset = dataset.merge(bureau_bal_STATUS_3_mean, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_bal_STATUS_3_mean']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
loss_fn = nn.CrossEntropyLoss() opt = optim.SGD(my_model.parameters() ,lr=0.01,momentum=0.9 )<train_model>
bureau_bal_STATUS_4_mean = bureau_bal_merged.groupby('SK_ID_CURR', as_index=False)['STATUS_4'].mean().rename(columns = {'STATUS_4': 'bureau_bal_STATUS_4_mean'}) dataset = dataset.merge(bureau_bal_STATUS_4_mean, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_bal_STATUS_4_mean']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
loss_epoch_arr = [] loss_arr = [] max_epochs = 6 min_loss = 1000 batch_size = 32 n_iters = np.ceil(9000/batch_size) for epoch in range(max_epochs): for i, data in enumerate(train_loader, 0): my_model.train() images, labels = data images = images.to(device) targetnp=labels[:,0,:].cpu().numpy() targetnp1 = labels[:,1,:].cpu().numpy() with torch.no_grad() : new_targets1 = np.argmax(targetnp,axis=1) new_targets2 = np.argmax(targetnp1,axis=1) new_targets1=torch.LongTensor(new_targets1) new_targets2=torch.LongTensor(new_targets2) new_targets1 = new_targets1.to(device) new_targets2 = new_targets2.to(device) opt.zero_grad() out = my_model.forward(images) loss_fc1 = loss_fn(out[0], new_targets1) loss_fc2 = loss_fn(out[1],new_targets2) loss = torch.add(loss_fc1,loss_fc2) loss.backward() opt.step() if min_loss > loss.item() : min_loss = loss.item() best_model = copy.deepcopy(my_model.state_dict()) print('Min loss %0.2f' % min_loss) if i % 100 == 0: print('Iteration: %d/%d, Loss: %0.2f' %(i, n_iters, loss.item())) del images, labels, out torch.cuda.empty_cache() loss_arr.append(loss.item()) print("Epoch number :",epoch) print("Train Accuracy :",evaluation(train_loader,my_model)) print("Test Accuracy :" ,evaluation(validation_loader,my_model)) loss_epoch_arr.append(loss.item()) plt.plot(loss_arr) plt.show()<compute_test_metric>
bureau_bal_STATUS_5_mean = bureau_bal_merged.groupby('SK_ID_CURR', as_index=False)['STATUS_5'].mean().rename(columns = {'STATUS_5': 'bureau_bal_STATUS_5_mean'}) dataset = dataset.merge(bureau_bal_STATUS_5_mean, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_bal_STATUS_5_mean']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
print(evaluation(validation_loader,my_model))<find_best_params>
bureau_bal_STATUS_C_mean = bureau_bal_merged.groupby('SK_ID_CURR', as_index=False)['STATUS_C'].mean().rename(columns = {'STATUS_C': 'bureau_bal_STATUS_C_mean'}) dataset = dataset.merge(bureau_bal_STATUS_C_mean, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_bal_STATUS_C_mean']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
my_model.eval() plist=[] fn_list=[] for inputs_test, fn in test_loader: inputs_test=inputs_test.to(device) out1,out2=my_model.forward(inputs_test) _,pred1=torch.max(out1,1) pred1=pred1.tolist() _,pred2=torch.max(out2,1) pred2=pred2.tolist() for x,y,z in zip(pred1,pred2,fn): p="V"+str(x)+"_"+"C"+str(y) plist.append(p) fn_list.append(z )<save_to_csv>
bureau_bal_STATUS_X_mean = bureau_bal_merged.groupby('SK_ID_CURR', as_index=False)['STATUS_X'].mean().rename(columns = {'STATUS_X': 'bureau_bal_STATUS_X_mean'}) dataset = dataset.merge(bureau_bal_STATUS_X_mean, on = 'SK_ID_CURR', how = 'left') corr = dataset['TARGET'].corr(dataset['bureau_bal_STATUS_X_mean']) print('%.4f' % corr )
Home Credit Default Risk
1,457,238
submission = pd.DataFrame({"ImageId":fn_list, "Class":plist}) submission.to_csv('submission.csv', index=False )<load_from_csv>
gc.enable() del bureau, BUREAU_count, DAYS_CREDIT_max, CREDIT_DAY_OVERDUE_max, DAYS_CREDIT_ENDDATE_max, DAYS_ENDDATE_FACT_mean, AMT_CREDIT_MAX_OVERDUE_max, AMT_CREDIT_MAX_OVERDUE_mean, CNT_CREDIT_PROLONG_sum, AMT_CREDIT_SUM_mean, AMT_CREDIT_SUM_DEBT_mean, AMT_CREDIT_SUM_LIMIT_mean, AMT_CREDIT_SUM_OVERDUE_mean, DAYS_CREDIT_UPDATE_max, AMT_ANNUITY_mean, bureau_cats, bureau_cats_grouped, bureau_balance, MONTHS_BALANCE_count, MONTHS_BALANCE_mean, MONTHS_BAL, bureau_bal_cats, bureau_bal_cats_grouped, bureau_bal_merged, bureau_bal_MONTHS_BALANCE_mean_mean, bureau_bal_MONTHS_BALANCE_count_mean, bureau_bal_STATUS_0_mean, bureau_bal_STATUS_1_mean, bureau_bal_STATUS_2_mean, bureau_bal_STATUS_3_mean, bureau_bal_STATUS_4_mean, bureau_bal_STATUS_5_mean, bureau_bal_STATUS_C_mean, bureau_bal_STATUS_X_mean gc.collect()
Home Credit Default Risk
1,457,238
%matplotlib inline warnings.filterwarnings("ignore", category=DeprecationWarning) df_train = pd.read_csv('.. /input/train.csv') df_test = pd.read_csv('.. /input/test.csv') df = df_train.append(df_test , ignore_index = True) df_train.shape, df_test.shape, df_train.columns.values<count_missing_values>
credit = pd.read_csv('.. /input/credit_card_balance.csv') credit.head()
Home Credit Default Risk
1,457,238
df['Pclass'].isnull().sum(axis=0 )<groupby>
credit_stats_by_prev = credit[['SK_ID_PREV', 'SK_ID_CURR']]
Home Credit Default Risk
1,457,238
df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean()<count_values>
credit_MONTHS_BALANCE_count = credit.groupby('SK_ID_PREV', as_index=False)['MONTHS_BALANCE'].count().rename(columns = {'MONTHS_BALANCE': 'credit_MONTHS_BALANCE_count'}) credit_MONTHS_BALANCE_mean = credit.groupby('SK_ID_PREV', as_index=False)['MONTHS_BALANCE'].mean().rename(columns = {'MONTHS_BALANCE': 'credit_MONTHS_BALANCE_mean'} )
Home Credit Default Risk
1,457,238
df['Title'] = df.Name.map(lambda x: x.split(',')[1].split('.')[0].strip()) df['Title'].value_counts()<feature_engineering>
credit_stats_by_prev = credit_stats_by_prev.merge(credit_MONTHS_BALANCE_count, on = 'SK_ID_PREV', how = 'left') credit_stats_by_prev = credit_stats_by_prev.merge(credit_MONTHS_BALANCE_mean, on = 'SK_ID_PREV', how = 'left') credit_stats_by_prev.head()
Home Credit Default Risk
1,457,238
df['Title'] = df['Title'].replace('Mlle', 'Miss') df['Title'] = df['Title'].replace(['Mme','Lady','Ms'], 'Mrs') df.Title.loc[(df.Title != 'Master')&(df.Title != 'Mr')&(df.Title != 'Miss') &(df.Title != 'Mrs')] = 'Others' df[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()<count_values>
gc.enable() del credit_MONTHS_BALANCE_count, credit_MONTHS_BALANCE_mean gc.collect()
Home Credit Default Risk
1,457,238
df['Title'].value_counts()<categorify>
credit_AMT_BALANCE_mean = credit.groupby('SK_ID_PREV', as_index=False)['AMT_BALANCE'].mean().rename(columns = {'AMT_BALANCE': 'credit_AMT_BALANCE_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_AMT_BALANCE_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_AMT_BALANCE_mean gc.collect()
Home Credit Default Risk
1,457,238
df = pd.concat([df, pd.get_dummies(df['Title'])], axis=1 ).drop(labels=['Name'], axis=1 )<count_missing_values>
credit_AMT_CREDIT_LIMIT_ACTUAL_mean = credit.groupby('SK_ID_PREV', as_index=False)['AMT_CREDIT_LIMIT_ACTUAL'].mean().rename(columns = {'AMT_CREDIT_LIMIT_ACTUAL': 'credit_AMT_CREDIT_LIMIT_ACTUAL_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_AMT_CREDIT_LIMIT_ACTUAL_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_AMT_CREDIT_LIMIT_ACTUAL_mean gc.collect()
Home Credit Default Risk
1,457,238
df.Sex.isnull().sum(axis=0 )<groupby>
credit_AMT_DRAWINGS_ATM_CURRENT_mean = credit.groupby('SK_ID_PREV', as_index=False)['AMT_DRAWINGS_ATM_CURRENT'].mean().rename(columns = {'AMT_DRAWINGS_ATM_CURRENT': 'credit_AMT_DRAWINGS_ATM_CURRENT_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_AMT_DRAWINGS_ATM_CURRENT_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_AMT_DRAWINGS_ATM_CURRENT_mean gc.collect()
Home Credit Default Risk
1,457,238
df[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).mean()<categorify>
credit_AMT_DRAWINGS_CURRENT_mean = credit.groupby('SK_ID_PREV', as_index=False)['AMT_DRAWINGS_CURRENT'].mean().rename(columns = {'AMT_DRAWINGS_CURRENT': 'credit_AMT_DRAWINGS_CURRENT_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_AMT_DRAWINGS_CURRENT_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_AMT_DRAWINGS_CURRENT_mean gc.collect()
Home Credit Default Risk
1,457,238
df.Sex = df.Sex.map({'male':0, 'female':1} )<count_missing_values>
credit_AMT_DRAWINGS_OTHER_CURRENT_mean = credit.groupby('SK_ID_PREV', as_index=False)['AMT_DRAWINGS_OTHER_CURRENT'].mean().rename(columns = {'AMT_DRAWINGS_OTHER_CURRENT': 'credit_AMT_DRAWINGS_OTHER_CURRENT_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_AMT_DRAWINGS_OTHER_CURRENT_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_AMT_DRAWINGS_OTHER_CURRENT_mean gc.collect()
Home Credit Default Risk
1,457,238
df.Age.isnull().sum(axis=0 )<count_missing_values>
credit_AMT_DRAWINGS_POS_CURRENT_mean = credit.groupby('SK_ID_PREV', as_index=False)['AMT_DRAWINGS_POS_CURRENT'].mean().rename(columns = {'AMT_DRAWINGS_POS_CURRENT': 'credit_AMT_DRAWINGS_POS_CURRENT_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_AMT_DRAWINGS_POS_CURRENT_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_AMT_DRAWINGS_POS_CURRENT_mean gc.collect()
Home Credit Default Risk
1,457,238
df.SibSp.isnull().sum(axis=0), df.Parch.isnull().sum(axis=0 )<feature_engineering>
credit_AMT_INST_MIN_REGULARITY_mean = credit.groupby('SK_ID_PREV', as_index=False)['AMT_INST_MIN_REGULARITY'].mean().rename(columns = {'AMT_INST_MIN_REGULARITY': 'credit_AMT_INST_MIN_REGULARITY_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_AMT_INST_MIN_REGULARITY_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_AMT_INST_MIN_REGULARITY_mean gc.collect()
Home Credit Default Risk
1,457,238
df['Family'] = df['SibSp'] + df['Parch'] + 1 df[['Family', 'Survived']].groupby(['Family'], as_index=False ).mean()<count_values>
credit_AMT_PAYMENT_CURRENT_mean = credit.groupby('SK_ID_PREV', as_index=False)['AMT_PAYMENT_CURRENT'].mean().rename(columns = {'AMT_PAYMENT_CURRENT': 'credit_AMT_PAYMENT_CURRENT_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_AMT_PAYMENT_CURRENT_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_AMT_PAYMENT_CURRENT_mean gc.collect()
Home Credit Default Risk
1,457,238
df['Family'].value_counts()<groupby>
credit_AMT_PAYMENT_TOTAL_CURRENT_mean = credit.groupby('SK_ID_PREV', as_index=False)['AMT_PAYMENT_TOTAL_CURRENT'].mean().rename(columns = {'AMT_PAYMENT_TOTAL_CURRENT': 'credit_AMT_PAYMENT_TOTAL_CURRENT_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_AMT_PAYMENT_TOTAL_CURRENT_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_AMT_PAYMENT_TOTAL_CURRENT_mean gc.collect()
Home Credit Default Risk
1,457,238
df.Family = df.Family.map(lambda x: 0 if x > 4 else x) df[['Family', 'Survived']].groupby(['Family'], as_index=False ).mean()<count_values>
credit_AMT_RECEIVABLE_PRINCIPAL_mean = credit.groupby('SK_ID_PREV', as_index=False)['AMT_RECEIVABLE_PRINCIPAL'].mean().rename(columns = {'AMT_RECEIVABLE_PRINCIPAL': 'credit_AMT_RECEIVABLE_PRINCIPAL_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_AMT_RECEIVABLE_PRINCIPAL_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_AMT_RECEIVABLE_PRINCIPAL_mean gc.collect()
Home Credit Default Risk
1,457,238
df['Family'].value_counts()<count_missing_values>
credit_AMT_RECIVABLE_mean = credit.groupby('SK_ID_PREV', as_index=False)['AMT_RECIVABLE'].mean().rename(columns = {'AMT_RECIVABLE': 'credit_AMT_RECIVABLE_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_AMT_RECIVABLE_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_AMT_RECIVABLE_mean gc.collect()
Home Credit Default Risk
1,457,238
df.Ticket.isnull().sum(axis=0 )<groupby>
credit_AMT_TOTAL_RECEIVABLE_mean = credit.groupby('SK_ID_PREV', as_index=False)['AMT_TOTAL_RECEIVABLE'].mean().rename(columns = {'AMT_TOTAL_RECEIVABLE': 'credit_AMT_TOTAL_RECEIVABLE_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_AMT_TOTAL_RECEIVABLE_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_AMT_TOTAL_RECEIVABLE_mean gc.collect()
Home Credit Default Risk
1,457,238
df.Ticket = df.Ticket.map(lambda x: x[0]) df[['Ticket', 'Survived']].groupby(['Ticket'], as_index=False ).mean()<count_values>
credit_CNT_DRAWINGS_ATM_CURRENT_mean = credit.groupby('SK_ID_PREV', as_index=False)['CNT_DRAWINGS_ATM_CURRENT'].mean().rename(columns = {'CNT_DRAWINGS_ATM_CURRENT': 'credit_CNT_DRAWINGS_ATM_CURRENT_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_CNT_DRAWINGS_ATM_CURRENT_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_CNT_DRAWINGS_ATM_CURRENT_mean gc.collect()
Home Credit Default Risk
1,457,238
df['Ticket'].value_counts()<groupby>
credit_CNT_DRAWINGS_CURRENT_mean = credit.groupby('SK_ID_PREV', as_index=False)['CNT_DRAWINGS_CURRENT'].mean().rename(columns = {'CNT_DRAWINGS_CURRENT': 'credit_CNT_DRAWINGS_CURRENT_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_CNT_DRAWINGS_CURRENT_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_CNT_DRAWINGS_CURRENT_mean gc.collect()
Home Credit Default Risk
1,457,238
df[['Ticket', 'Fare']].groupby(['Ticket'], as_index=False ).mean()<groupby>
credit_CNT_DRAWINGS_OTHER_CURRENT_mean = credit.groupby('SK_ID_PREV', as_index=False)['CNT_DRAWINGS_OTHER_CURRENT'].mean().rename(columns = {'CNT_DRAWINGS_OTHER_CURRENT': 'credit_CNT_DRAWINGS_OTHER_CURRENT_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_CNT_DRAWINGS_OTHER_CURRENT_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_CNT_DRAWINGS_OTHER_CURRENT_mean gc.collect()
Home Credit Default Risk
1,457,238
df[['Ticket', 'Pclass']].groupby(['Ticket'], as_index=False ).mean()<count_missing_values>
credit_CNT_DRAWINGS_POS_CURRENT_mean = credit.groupby('SK_ID_PREV', as_index=False)['CNT_DRAWINGS_POS_CURRENT'].mean().rename(columns = {'CNT_DRAWINGS_POS_CURRENT': 'credit_CNT_DRAWINGS_POS_CURRENT_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_CNT_DRAWINGS_POS_CURRENT_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_CNT_DRAWINGS_POS_CURRENT_mean gc.collect()
Home Credit Default Risk
1,457,238
df.Fare.isnull().sum(axis=0) <filter>
credit_CNT_INSTALMENT_MATURE_CUM_mean = credit.groupby('SK_ID_PREV', as_index=False)['CNT_INSTALMENT_MATURE_CUM'].mean().rename(columns = {'CNT_INSTALMENT_MATURE_CUM': 'credit_CNT_INSTALMENT_MATURE_CUM_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_CNT_INSTALMENT_MATURE_CUM_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_CNT_INSTALMENT_MATURE_CUM_mean gc.collect()
Home Credit Default Risk
1,457,238
df.Ticket[df.Fare.isnull() ]<filter>
credit_SK_DPD_mean = credit.groupby('SK_ID_PREV', as_index=False)['SK_DPD'].mean().rename(columns = {'SK_DPD': 'credit_SK_DPD_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_SK_DPD_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_SK_DPD_mean gc.collect()
Home Credit Default Risk
1,457,238
df.Pclass[df.Fare.isnull() ]<filter>
credit_SK_DPD_DEF_mean = credit.groupby('SK_ID_PREV', as_index=False)['SK_DPD_DEF'].mean().rename(columns = {'SK_DPD_DEF': 'credit_SK_DPD_DEF_mean'}) credit_stats_by_prev = credit_stats_by_prev.merge(credit_SK_DPD_DEF_mean, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_SK_DPD_DEF_mean gc.collect()
Home Credit Default Risk
1,457,238
df.Cabin[df.Fare.isnull() ]<filter>
credit_cats = pd.get_dummies(credit.select_dtypes('object')) credit_cats['SK_ID_PREV'] = credit['SK_ID_PREV'] credit_cats.head()
Home Credit Default Risk
1,457,238
df.Embarked[df.Fare.isnull() ]<groupby>
credit_cats_grouped = credit_cats.groupby('SK_ID_PREV' ).agg('sum') credit_cats_grouped.head()
Home Credit Default Risk
1,457,238
df[['Pclass', 'Fare']].groupby(['Pclass'] ).mean()<groupby>
credit_stats_by_prev = credit_stats_by_prev.merge(credit_cats_grouped, on = 'SK_ID_PREV', how = 'left') gc.enable() del credit_cats_grouped, credit_cats gc.collect()
Home Credit Default Risk
1,457,238
df[['Ticket', 'Fare']].groupby(['Ticket'] ).mean()<groupby>
credit_MONTHS_BALANCE_count_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_MONTHS_BALANCE_count'].mean().rename(columns = {'credit_MONTHS_BALANCE_count': 'credit_MONTHS_BALANCE_count_mean'}) dataset = dataset.merge(credit_MONTHS_BALANCE_count_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_MONTHS_BALANCE_count_mean gc.collect()
Home Credit Default Risk
1,457,238
df[['Embarked', 'Fare']].groupby(['Embarked'] ).mean()<groupby>
credit_MONTHS_BALANCE_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_MONTHS_BALANCE_mean'].mean().rename(columns = {'credit_MONTHS_BALANCE_mean': 'credit_MONTHS_BALANCE_mean_mean'}) dataset = dataset.merge(credit_MONTHS_BALANCE_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_MONTHS_BALANCE_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
guess_Fare = df.Fare.loc[(df.Ticket == '3')&(df.Pclass == 3)&(df.Embarked == 'S')].median() df.Fare.fillna(guess_Fare , inplace=True) df[['Fare', 'Survived']].groupby(['Survived'],as_index=False ).mean()<data_type_conversions>
credit_AMT_BALANCE_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_AMT_BALANCE_mean'].mean().rename(columns = {'credit_AMT_BALANCE_mean': 'credit_AMT_BALANCE_mean_mean'}) dataset = dataset.merge(credit_AMT_BALANCE_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_AMT_BALANCE_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df['Fare-bin'] = pd.qcut(df.Fare,5,labels=[1,2,3,4,5] ).astype(int) df[['Fare-bin', 'Survived']].groupby(['Fare-bin'], as_index=False ).mean()<count_missing_values>
credit_AMT_CREDIT_LIMIT_ACTUAL_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_AMT_CREDIT_LIMIT_ACTUAL_mean'].mean().rename(columns = {'credit_AMT_CREDIT_LIMIT_ACTUAL_mean': 'credit_AMT_CREDIT_LIMIT_ACTUAL_mean_mean'}) dataset = dataset.merge(credit_AMT_CREDIT_LIMIT_ACTUAL_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_AMT_CREDIT_LIMIT_ACTUAL_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df.Cabin.isnull().sum(axis=0 )<drop_column>
credit_AMT_DRAWINGS_ATM_CURRENT_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_AMT_DRAWINGS_ATM_CURRENT_mean'].mean().rename(columns = {'credit_AMT_DRAWINGS_ATM_CURRENT_mean': 'credit_AMT_DRAWINGS_ATM_CURRENT_mean_mean'}) dataset = dataset.merge(credit_AMT_DRAWINGS_ATM_CURRENT_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_AMT_DRAWINGS_ATM_CURRENT_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df = df.drop(labels=['Cabin'], axis=1 )<count_missing_values>
credit_AMT_DRAWINGS_CURRENT_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_AMT_DRAWINGS_CURRENT_mean'].mean().rename(columns = {'credit_AMT_DRAWINGS_CURRENT_mean': 'credit_AMT_DRAWINGS_CURRENT_mean_mean'}) dataset = dataset.merge(credit_AMT_DRAWINGS_CURRENT_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_AMT_DRAWINGS_CURRENT_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df.Embarked.isnull().sum(axis=0 )<data_type_conversions>
credit_AMT_DRAWINGS_OTHER_CURRENT_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_AMT_DRAWINGS_OTHER_CURRENT_mean'].mean().rename(columns = {'credit_AMT_DRAWINGS_OTHER_CURRENT_mean': 'credit_AMT_DRAWINGS_OTHER_CURRENT_mean_mean'}) dataset = dataset.merge(credit_AMT_DRAWINGS_OTHER_CURRENT_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_AMT_DRAWINGS_OTHER_CURRENT_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df.Embarked.fillna('S' , inplace=True )<groupby>
credit_AMT_DRAWINGS_POS_CURRENT_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_AMT_DRAWINGS_POS_CURRENT_mean'].mean().rename(columns = {'credit_AMT_DRAWINGS_POS_CURRENT_mean': 'credit_AMT_DRAWINGS_POS_CURRENT_mean_mean'}) dataset = dataset.merge(credit_AMT_DRAWINGS_POS_CURRENT_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_AMT_DRAWINGS_POS_CURRENT_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df[['Embarked', 'Survived','Pclass','Fare', 'Age', 'Sex']].groupby(['Embarked'], as_index=False ).mean()<drop_column>
credit_AMT_INST_MIN_REGULARITY_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_AMT_INST_MIN_REGULARITY_mean'].mean().rename(columns = {'credit_AMT_INST_MIN_REGULARITY_mean': 'credit_AMT_INST_MIN_REGULARITY_mean_mean'}) dataset = dataset.merge(credit_AMT_INST_MIN_REGULARITY_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_AMT_INST_MIN_REGULARITY_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df = df.drop(labels='Embarked', axis=1 )<groupby>
credit_AMT_PAYMENT_CURRENT_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_AMT_PAYMENT_CURRENT_mean'].mean().rename(columns = {'credit_AMT_PAYMENT_CURRENT_mean': 'credit_AMT_PAYMENT_CURRENT_mean_mean'}) dataset = dataset.merge(credit_AMT_PAYMENT_CURRENT_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_AMT_PAYMENT_CURRENT_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df[['Title', 'Age']].groupby(['Title'] ).mean()<groupby>
credit_AMT_PAYMENT_TOTAL_CURRENT_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_AMT_PAYMENT_TOTAL_CURRENT_mean'].mean().rename(columns = {'credit_AMT_PAYMENT_TOTAL_CURRENT_mean': 'credit_AMT_PAYMENT_TOTAL_CURRENT_mean_mean'}) dataset = dataset.merge(credit_AMT_PAYMENT_TOTAL_CURRENT_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_AMT_PAYMENT_TOTAL_CURRENT_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df[['Fare-bin', 'Age']].groupby(['Fare-bin'] ).mean()<groupby>
credit_AMT_RECEIVABLE_PRINCIPAL_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_AMT_RECEIVABLE_PRINCIPAL_mean'].mean().rename(columns = {'credit_AMT_RECEIVABLE_PRINCIPAL_mean': 'credit_AMT_RECEIVABLE_PRINCIPAL_mean_mean'}) dataset = dataset.merge(credit_AMT_RECEIVABLE_PRINCIPAL_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_AMT_RECEIVABLE_PRINCIPAL_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df[['SibSp', 'Age']].groupby(['SibSp'] ).mean()<groupby>
credit_AMT_RECIVABLE_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_AMT_RECIVABLE_mean'].mean().rename(columns = {'credit_AMT_RECIVABLE_mean': 'credit_AMT_RECIVABLE_mean_mean'}) dataset = dataset.merge(credit_AMT_RECIVABLE_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_AMT_RECIVABLE_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df[['Parch', 'Age']].groupby(['Parch'] ).mean()<prepare_x_and_y>
credit_AMT_TOTAL_RECEIVABLE_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_AMT_TOTAL_RECEIVABLE_mean'].mean().rename(columns = {'credit_AMT_TOTAL_RECEIVABLE_mean': 'credit_AMT_TOTAL_RECEIVABLE_mean_mean'}) dataset = dataset.merge(credit_AMT_TOTAL_RECEIVABLE_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_AMT_TOTAL_RECEIVABLE_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df_sub = df[['Age','Master','Miss','Mr','Mrs','Others','Fare-bin','SibSp']] X_train = df_sub.dropna().drop('Age', axis=1) y_train = df['Age'].dropna() X_test = df_sub.loc[np.isnan(df.Age)].drop('Age', axis=1) pca = PCA(n_components=2) X_train = pca.fit_transform(X_train) X_test = pca.transform(X_test) regressor = RandomForestRegressor(n_estimators = 300) regressor.fit(X_train, y_train) y_pred = np.round(regressor.predict(X_test),1) df.Age.loc[df.Age.isnull() ] = y_pred df.Age.isnull().sum(axis=0 )<categorify>
credit_CNT_DRAWINGS_ATM_CURRENT_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_CNT_DRAWINGS_ATM_CURRENT_mean'].mean().rename(columns = {'credit_CNT_DRAWINGS_ATM_CURRENT_mean': 'credit_CNT_DRAWINGS_ATM_CURRENT_mean_mean'}) dataset = dataset.merge(credit_CNT_DRAWINGS_ATM_CURRENT_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_CNT_DRAWINGS_ATM_CURRENT_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
bins = [ 0, 4, 12, 18, 30, 50, 65, 100] age_index =(1,2,3,4,5,6,7) df['Age-bin'] = pd.cut(df.Age, bins, labels=age_index ).astype(int) df[['Age-bin', 'Survived']].groupby(['Age-bin'],as_index=False ).mean()<groupby>
credit_CNT_DRAWINGS_CURRENT_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_CNT_DRAWINGS_CURRENT_mean'].mean().rename(columns = {'credit_CNT_DRAWINGS_CURRENT_mean': 'credit_CNT_DRAWINGS_CURRENT_mean_mean'}) dataset = dataset.merge(credit_CNT_DRAWINGS_CURRENT_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_CNT_DRAWINGS_CURRENT_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df[['Ticket', 'Survived']].groupby(['Ticket'], as_index=False ).mean()<count_values>
credit_CNT_DRAWINGS_OTHER_CURRENT_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_CNT_DRAWINGS_OTHER_CURRENT_mean'].mean().rename(columns = {'credit_CNT_DRAWINGS_OTHER_CURRENT_mean': 'credit_CNT_DRAWINGS_OTHER_CURRENT_mean_mean'}) dataset = dataset.merge(credit_CNT_DRAWINGS_OTHER_CURRENT_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_CNT_DRAWINGS_OTHER_CURRENT_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df['Ticket'].value_counts()<groupby>
credit_CNT_DRAWINGS_POS_CURRENT_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_CNT_DRAWINGS_POS_CURRENT_mean'].mean().rename(columns = {'credit_CNT_DRAWINGS_POS_CURRENT_mean': 'credit_CNT_DRAWINGS_POS_CURRENT_mean_mean'}) dataset = dataset.merge(credit_CNT_DRAWINGS_POS_CURRENT_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_CNT_DRAWINGS_POS_CURRENT_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df['Ticket'] = df['Ticket'].replace(['A','W','F','L','5','6','7','8','9'], '4') df[['Ticket', 'Survived']].groupby(['Ticket'], as_index=False ).mean()<categorify>
credit_CNT_INSTALMENT_MATURE_CUM_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_CNT_INSTALMENT_MATURE_CUM_mean'].mean().rename(columns = {'credit_CNT_INSTALMENT_MATURE_CUM_mean': 'credit_CNT_INSTALMENT_MATURE_CUM_mean_mean'}) dataset = dataset.merge(credit_CNT_INSTALMENT_MATURE_CUM_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_CNT_INSTALMENT_MATURE_CUM_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df = pd.get_dummies(df,columns=['Ticket'] )<prepare_x_and_y>
credit_SK_DPD_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_SK_DPD_mean'].mean().rename(columns = {'credit_SK_DPD_mean': 'credit_SK_DPD_mean_mean'}) dataset = dataset.merge(credit_SK_DPD_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_SK_DPD_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
df = df.drop(labels=['SibSp','Parch','Age','Fare','Title'], axis=1) y_train = df[0:891]['Survived'].values X_train = df[0:891].drop(['Survived','PassengerId'], axis=1 ).values X_test = df[891:].drop(['Survived','PassengerId'], axis=1 ).values<train_model>
credit_SK_DPD_DEF_mean_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['credit_SK_DPD_DEF_mean'].mean().rename(columns = {'credit_SK_DPD_DEF_mean': 'credit_SK_DPD_DEF_mean_mean'}) dataset = dataset.merge(credit_SK_DPD_DEF_mean_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_SK_DPD_DEF_mean_mean gc.collect()
Home Credit Default Risk
1,457,238
model = Sequential() model.add(Dense(units = 9, kernel_initializer = 'uniform', activation = 'relu', input_dim = 17)) model.add(Dense(units = 9, kernel_initializer = 'uniform', activation = 'relu')) model.add(Dense(units = 5, kernel_initializer = 'uniform', activation = 'relu')) model.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid')) model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) model.fit(X_train, y_train, batch_size = 32, epochs = 200 )<save_to_csv>
credit_NAME_CONTRACT_STATUS_Active_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Active'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Active': 'credit_NAME_CONTRACT_STATUS_Active_mean'}) dataset = dataset.merge(credit_NAME_CONTRACT_STATUS_Active_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_NAME_CONTRACT_STATUS_Active_mean gc.collect()
Home Credit Default Risk
1,457,238
y_pred = model.predict(X_test) y_final =(y_pred > 0.5 ).astype(int ).reshape(X_test.shape[0]) output = pd.DataFrame({'PassengerId': df_test['PassengerId'], 'Survived': y_final}) output.to_csv('prediction-ann.csv', index=False )<set_options>
credit_NAME_CONTRACT_STATUS_Approved_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Approved'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Approved': 'credit_NAME_CONTRACT_STATUS_Approved_mean'}) dataset = dataset.merge(credit_NAME_CONTRACT_STATUS_Approved_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_NAME_CONTRACT_STATUS_Approved_mean gc.collect()
Home Credit Default Risk
1,457,238
plt.style.use('seaborn') sns.set(font_scale=2.5) warnings.filterwarnings('ignore') %matplotlib inline <load_from_csv>
credit_NAME_CONTRACT_STATUS_Completed_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Completed'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Completed': 'credit_NAME_CONTRACT_STATUS_Completed_mean'}) dataset = dataset.merge(credit_NAME_CONTRACT_STATUS_Completed_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_NAME_CONTRACT_STATUS_Completed_mean gc.collect()
Home Credit Default Risk
1,457,238
train_data = pd.read_csv('.. /input/train.csv') train_data.head()<load_from_csv>
credit_NAME_CONTRACT_STATUS_Demand_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Demand'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Demand': 'credit_NAME_CONTRACT_STATUS_Demand_mean'}) dataset = dataset.merge(credit_NAME_CONTRACT_STATUS_Demand_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_NAME_CONTRACT_STATUS_Demand_mean gc.collect()
Home Credit Default Risk
1,457,238
test_data = pd.read_csv('.. /input/test.csv') test_data.head()<count_missing_values>
credit_NAME_CONTRACT_STATUS_Refused_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Refused'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Refused': 'credit_NAME_CONTRACT_STATUS_Refused_mean'}) dataset = dataset.merge(credit_NAME_CONTRACT_STATUS_Refused_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_NAME_CONTRACT_STATUS_Refused_mean gc.collect()
Home Credit Default Risk
1,457,238
train_data.isnull().sum()<count_missing_values>
credit_NAME_CONTRACT_STATUS_Sent_mean = credit_stats_by_prev.groupby('SK_ID_CURR', as_index=False)['NAME_CONTRACT_STATUS_Sent proposal'].mean().rename(columns = {'NAME_CONTRACT_STATUS_Sent proposal': 'credit_NAME_CONTRACT_STATUS_Sent_mean'}) dataset = dataset.merge(credit_NAME_CONTRACT_STATUS_Sent_mean, on = 'SK_ID_CURR', how = 'left') gc.enable() del credit_NAME_CONTRACT_STATUS_Sent_mean gc.collect()
Home Credit Default Risk