kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
17,910,828 | train_data = df_train.drop(['id','keyword','location'], axis=1)
train_data.to_csv('cleaned_train.csv', index=False)
test_data = df_test.drop(['keyword','location'], axis=1)
test_data.to_csv('cleaned_test.csv', index=False )<load_from_csv> | preds = clf.predict_proba(apps_all_test.drop('SK_ID_CURR', axis=1)) [:, 1 ]
apps_all_test['TARGET'] = preds
apps_all_test[['SK_ID_CURR', 'TARGET']].to_csv('prev_baseline_03.csv', index=False ) | Home Credit Default Risk |
17,544,955 | train_data = pd.read_csv('cleaned_train.csv')
len(train_data)
<set_options> | application_train = pd.read_csv('/kaggle/input/home-credit-default-risk/application_train.csv')
application_test = pd.read_csv('/kaggle/input/home-credit-default-risk/application_test.csv')
| Home Credit Default Risk |
17,544,955 | SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True<load_pretrained> | print("Dimension of application_train :", application_train.shape)
print("결측치가 있는 컬럼 수 :",(application_train.isnull().sum() !=0 ).sum())
application_train.head() | Home Credit Default Risk |
17,544,955 | tokenizer = BertTokenizer.from_pretrained('bert-base-uncased' )<string_transform> | print("Dimension :", application_train.dropna(axis=0 ).shape)
print("결측치가 있는 컬럼 수 :",(application_train.dropna(axis=0 ).isnull().sum() !=0 ).sum())
application_train.dropna(axis=0 ) | Home Credit Default Risk |
17,544,955 | init_token = tokenizer.cls_token
eos_token = tokenizer.sep_token
pad_token = tokenizer.pad_token
unk_token = tokenizer.unk_token<categorify> | column_list = []
for name in column_series.keys() :
if(column_series[name]>100000):
column_list.append(name)
print(column_list, len(column_list)) | Home Credit Default Risk |
17,544,955 | init_token_idx = tokenizer.convert_tokens_to_ids(init_token)
eos_token_idx = tokenizer.convert_tokens_to_ids(eos_token)
pad_token_idx = tokenizer.convert_tokens_to_ids(pad_token)
unk_token_idx = tokenizer.convert_tokens_to_ids(unk_token)
print(init_token_idx, eos_token_idx, pad_token_idx, unk_token_idx )<define_variables> | def show_hist_by_target(df, columns):
cond_1 =(df['TARGET'] == 1)
cond_0 =(df['TARGET'] == 0)
for column in columns:
fig, ax = plt.subplots(figsize=(12, 4), nrows=1, ncols=2, squeeze=False)
if(type(df[column][0])is str):
df_temp = df[["TARGET",column]].value_counts().astype(float)
idx_temp = df_temp.reset_index(name='RATIO')[column].unique()
for i in range(0,2):
sum_temp = df_temp[i].sum()
for j in idx_temp:
df_temp[(i,j)] = df_temp[(i,j)] / sum_temp
df_temp = df_temp.reset_index(name='RATIO')
sns.barplot(x="TARGET", y="RATIO", hue=column, data=df_temp,ax=ax[0][0])
sns.lineplot(x=df[cond_1][column].value_counts().keys().tolist() , y=df[cond_1][column].value_counts() , label = 'target=1', color='red', ax=ax[0][1])
sns.lineplot(x=df[cond_0][column].value_counts().keys().tolist() , y=df[cond_0][column].value_counts() , label = 'target=0', color='blue', ax=ax[0][1])
else:
sns.violinplot(x='TARGET', y=column, data=df, ax=ax[0][0])
sns.histplot(df[cond_1][column], label='target=1', color='red', ax=ax[0][1], kde=True)
sns.histplot(df[cond_0][column], label='target=0', color='blue', ax=ax[0][1], kde=True)
plt.show()
plt.close() | Home Credit Default Risk |
17,544,955 | max_input_length = tokenizer.max_model_input_sizes['bert-base-uncased']
print(max_input_length )<string_transform> | abs(cor["TARGET"] ).sort_values() | Home Credit Default Risk |
17,544,955 | def tokenize_and_cut(sentence):
tokens = tokenizer.tokenize(sentence)
tokens = tokens[:max_input_length-2]
return tokens<data_type_conversions> | application_train.dtypes.value_counts() | Home Credit Default Risk |
17,544,955 | TEXT = data.Field(batch_first = True,
use_vocab = False,
tokenize = tokenize_and_cut,
preprocessing = tokenizer.convert_tokens_to_ids,
init_token = init_token_idx,
eos_token = eos_token_idx,
pad_token = pad_token_idx,
unk_token = unk_token_idx)
LABEL = data.LabelField(dtype = torch.float )<split> | application_train["FONDKAPREMONT_MODE"] | Home Credit Default Risk |
17,544,955 | fields = [('text', TEXT),('target', LABEL)]
datasets = torchtext.legacy.data.TabularDataset(
path='cleaned_train.csv',format='csv',skip_header=True,fields=fields)
train_data, test_data = datasets.split(split_ratio=[0.95, 0.05])
train_data, valid_data = train_data.split(random_state = random.seed(SEED))<feature_engineering> | le = LabelEncoder()
le_count = 0
for col in application_train:
if application_train[col].dtype == 'object':
if len(list(application_train[col].unique())) >= 2:
le.fit(application_train[col])
application_train[col] = le.transform(application_train[col])
application_test[col] = le.transform(application_test[col])
le_count += 1
print('Label Encoding : %d 컬럼 라벨 인코딩 완료.' % le_count ) | Home Credit Default Risk |
17,544,955 | LABEL.build_vocab(train_data )<split> | application_train["FONDKAPREMONT_MODE"] | Home Credit Default Risk |
17,544,955 | BATCH_SIZE = 128
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
device = device )<split> | application_train.select_dtypes('object' ).apply(pd.Series.nunique, axis = 0 ) | Home Credit Default Risk |
17,544,955 | train_data, valid_data = train_data.split(
split_ratio=[0.85, 0.15],
random_state=random.seed(123))
print('Num Train: {}'.format(len(train_data)))
print('Num Validation: {}'.format(len(valid_data)) )<load_pretrained> | rel_list = []
for rel_column in rel.index:
if(rel[rel_column]<0.03):
rel_list.append(rel_column)
print(rel_column ) | Home Credit Default Risk |
17,544,955 | bert = BertModel.from_pretrained('bert-base-uncased' )<import_modules> | rel_list.remove('SK_ID_CURR' ) | Home Credit Default Risk |
17,544,955 | class BERTGRUDisaster(nn.Module):
def __init__(self,
bert,
hidden_dim,
output_dim,
n_layers,
bidirectional,
dropout):
super().__init__()
self.bert = bert
embedding_dim = bert.config.to_dict() ['hidden_size']
self.rnn = nn.GRU(embedding_dim,
hidden_dim,
num_layers = n_layers,
bidirectional = bidirectional,
batch_first = True,
dropout = 0 if n_layers < 2 else dropout)
self.out = nn.Linear(hidden_dim * 2 if bidirectional else hidden_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text):
with torch.no_grad() :
embedded = self.bert(text)[0]
_, hidden = self.rnn(embedded)
if self.rnn.bidirectional:
hidden = self.dropout(torch.cat(( hidden[-2,:,:], hidden[-1,:,:]), dim = 1))
else:
hidden = self.dropout(hidden[-1,:,:])
output = self.out(hidden)
return output<choose_model_class> | column_list.remove("EXT_SOURCE_1")
app_train = application_train | Home Credit Default Risk |
17,544,955 | HIDDEN_DIM = 256
OUTPUT_DIM = 1
N_LAYERS = 2
BIDIRECTIONAL = True
DROPOUT = 0.25
model = BERTGRUDisaster(bert,
HIDDEN_DIM,
OUTPUT_DIM,
N_LAYERS,
BIDIRECTIONAL,
DROPOUT )<find_best_params> | print("Dimension of application_test :", application_test.shape)
print("결측치가 있는 컬럼 수 :",(application_test.isnull().sum() !=0 ).sum())
application_test.head() | Home Credit Default Risk |
17,544,955 | for name, param in model.named_parameters() :
if name.startswith('bert'):
param.requires_grad = False<choose_model_class> | app_test = application_test | Home Credit Default Risk |
17,544,955 | optimizer = optim.Adam(model.parameters())
criterion = nn.BCEWithLogitsLoss()<find_best_params> | def data_processing(out, data):
out['APPS_EXT_SOURCE_MEAN'] = data[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)
out['APPS_EXT_SOURCE_STD'] = data[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)
out['APPS_EXT_SOURCE_STD'] = out['APPS_EXT_SOURCE_STD'].fillna(out['APPS_EXT_SOURCE_STD'].mean())
out['APPS_ANNUITY_CREDIT_RATIO'] = data['AMT_ANNUITY']/data['AMT_CREDIT']
out['APPS_GOODS_CREDIT_RATIO'] = data['AMT_GOODS_PRICE']/data['AMT_CREDIT']
out['APPS_ANNUITY_INCOME_RATIO'] = data['AMT_ANNUITY']/data['AMT_INCOME_TOTAL']
out['APPS_GOODS_INCOME_RATIO'] = data['AMT_GOODS_PRICE']/data['AMT_INCOME_TOTAL']
out['APPS_CREDIT_INCOME_RATIO'] = data['AMT_CREDIT']/data['AMT_INCOME_TOTAL']
out['APPS_CNT_FAM_INCOME_RATIO'] = data['AMT_INCOME_TOTAL']/data['CNT_FAM_MEMBERS']
out['APPS_EMPLOYED_BIRTH_RATIO'] = data['DAYS_EMPLOYED']/data['DAYS_BIRTH']
out['APPS_INCOME_EMPLOYED_RATIO'] = data['AMT_INCOME_TOTAL']/data['DAYS_EMPLOYED']
out['APPS_INCOME_BIRTH_RATIO'] = data['AMT_INCOME_TOTAL']/data['DAYS_BIRTH']
out['APPS_CAR_BIRTH_RATIO'] = data['OWN_CAR_AGE'] / data['DAYS_BIRTH']
out['APPS_CAR_EMPLOYED_RATIO'] = data['OWN_CAR_AGE'] / data['DAYS_EMPLOYED']
return out | Home Credit Default Risk |
17,544,955 | model = model.to(device)
criterion = criterion.to(device )<compute_test_metric> | app_train = data_processing(app_train, application_train)
app_test = data_processing(app_test, application_test)
app_train.shape, app_test.shape | Home Credit Default Risk |
17,544,955 | def binary_accuracy(preds, y):
rounded_preds = torch.round(torch.sigmoid(preds))
correct =(rounded_preds == y ).float()
acc = correct.sum() / len(correct)
return acc<train_on_grid> | prev_app = pd.read_csv('.. /input/home-credit-default-risk/previous_application.csv')
print(prev_app.shape, app_train.shape ) | Home Credit Default Risk |
17,544,955 | def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch.text ).squeeze(1)
loss = criterion(predictions, batch.target)
acc = binary_accuracy(predictions, batch.target)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator )<train_model> | prev_app['PREV_CREDIT_DIFF'] = prev_app['AMT_APPLICATION'] - prev_app['AMT_CREDIT']
prev_app['PREV_GOODS_DIFF'] = prev_app['AMT_APPLICATION'] - prev_app['AMT_GOODS_PRICE']
prev_app['PREV_CREDIT_APPL_RATIO'] = prev_app['AMT_CREDIT']/prev_app['AMT_APPLICATION']
prev_app['PREV_ANNUITY_APPL_RATIO'] = prev_app['AMT_ANNUITY']/prev_app['AMT_APPLICATION']
prev_app['PREV_GOODS_APPL_RATIO'] = prev_app['AMT_GOODS_PRICE']/prev_app['AMT_APPLICATION'] | Home Credit Default Risk |
17,544,955 | def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time -(elapsed_mins * 60))
return elapsed_mins, elapsed_secs<compute_test_metric> | prev_app['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace=True)
prev_app['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev_app['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev_app['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev_app['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
prev_app['PREV_DAYS_LAST_DUE_DIFF'] = prev_app['DAYS_LAST_DUE_1ST_VERSION'] - prev_app['DAYS_LAST_DUE'] | Home Credit Default Risk |
17,544,955 | def binary_accuracy(preds, y):
rounded_preds = torch.round(torch.sigmoid(preds))
correct =(rounded_preds == y ).float()
acc = correct.sum() / len(correct)
return acc<train_model> | agg_dict = {
'SK_ID_CURR':['count'],
'AMT_CREDIT':['mean', 'max', 'sum'],
'AMT_ANNUITY':['mean', 'max', 'sum'],
'AMT_APPLICATION':['mean', 'max', 'sum'],
'AMT_DOWN_PAYMENT':['mean', 'max', 'sum'],
'AMT_GOODS_PRICE':['mean', 'max', 'sum'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
'PREV_CREDIT_DIFF':['mean', 'max', 'sum'],
'PREV_CREDIT_APPL_RATIO':['mean', 'max'],
'PREV_GOODS_DIFF':['mean', 'max', 'sum'],
'PREV_GOODS_APPL_RATIO':['mean', 'max'],
'PREV_DAYS_LAST_DUE_DIFF':['mean', 'max', 'sum'],
'PREV_INTERESTS_RATE':['mean', 'max']
}
prev_group = prev_app.groupby('SK_ID_CURR')
prev_amt_agg = prev_group.agg(agg_dict)
prev_amt_agg.columns = ['PREV_' +('_' ).join(column ).upper() for column in prev_amt_agg.columns.ravel() ]
prev_amt_agg.head() | Home Credit Default Risk |
17,544,955 | N_EPOCHS = 5
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
<save_model> | prev_app_merge = app_train.merge(prev_amt_agg, on='SK_ID_CURR', how='left', indicator=True)
prev_app_merge = prev_app_merge.drop(columns=['_merge'])
prev_app_merge.shape | Home Credit Default Risk |
17,544,955 | torch.save(model.state_dict() , 'disaster-model.pt' )<predict_on_test> | prev_app['NAME_CONTRACT_STATUS'].value_counts() | Home Credit Default Risk |
17,544,955 | def predict_disaster(model, tokenizer, sentence):
model.eval()
tokens = tokenizer.tokenize(sentence)
tokens = tokens[:max_input_length-2]
indexed = [init_token_idx] + tokenizer.convert_tokens_to_ids(tokens)+ [eos_token_idx]
tensor = torch.LongTensor(indexed ).to(device)
tensor = tensor.unsqueeze(0)
prediction = torch.sigmoid(model(tensor))
return prediction.item()<predict_on_test> | cond_refused =(prev_app['NAME_CONTRACT_STATUS'] == 'Refused')
cond_approved =(prev_app['NAME_CONTRACT_STATUS'] == 'Approved')
prev_refused = prev_app[cond_refused]
prev_approved = prev_app[cond_approved]
prev_refused.shape, prev_approved.shape, prev_app.shape | Home Credit Default Risk |
17,544,955 | predict_disaster(model, tokenizer, "Our Deeds are the Reason of this<load_from_csv> | prev_refused = prev_refused.groupby('SK_ID_CURR')
prev_approved = prev_approved.groupby('SK_ID_CURR' ) | Home Credit Default Risk |
17,544,955 | test_data = pd.read_csv('cleaned_test.csv')
test_data.head(10 )<count_missing_values> | prev_refused = prev_refused['NAME_CONTRACT_TYPE'].count()
prev_refused.name = "PRE_CONTRACT_REFUSED"
prev_approved = prev_approved['NAME_CONTRACT_TYPE'].count()
prev_approved.name = "PRE_CONTRACT_APPROVED" | Home Credit Default Risk |
17,544,955 | test_data = test_data.fillna('nan')
test_data.isna().sum()<categorify> | prev_app_merge = prev_app_merge.merge(prev_approved, on='SK_ID_CURR', how='left', indicator=False)
prev_app_merge = prev_app_merge.merge(prev_refused, on='SK_ID_CURR', how='left', indicator=False)
prev_app_merge['PRE_CONTRACT_APPROVED_RATE'] = prev_app_merge['PRE_CONTRACT_APPROVED'] /(prev_app_merge['PRE_CONTRACT_APPROVED'] + prev_app_merge['PRE_CONTRACT_REFUSED'])
prev_app_merge['PRE_CONTRACT_REFUSED_RATE'] = prev_app_merge['PRE_CONTRACT_REFUSED'] /(prev_app_merge['PRE_CONTRACT_APPROVED'] + prev_app_merge['PRE_CONTRACT_REFUSED'])
prev_app_merge.head() | Home Credit Default Risk |
17,544,955 | submission_dict = {'id' : [], 'target' : []}
for data in test_data.iterrows() :
idx = data[1].id
text = data[1].text
target = predict_disaster(model, tokenizer, text)
target = 0 if target < 0.5 else 1
submission_dict['id'].append(idx)
submission_dict['target'].append(target)
<create_dataframe> | prev_app_merge = prev_app_merge.replace(float('NaN'),0)
prev_app_merge.head() | Home Credit Default Risk |
17,544,955 | sample_df = pd.DataFrame(submission_dict)
sample_df<save_to_csv> | bureau = pd.read_csv('.. /input/home-credit-default-risk/bureau.csv')
print("Size of bureau data", bureau.shape ) | Home Credit Default Risk |
17,544,955 | sample_df.to_csv('sample_submission_01.csv', index=False )<load_from_csv> | PAST_LOANS_PER_CUS = bureau[['SK_ID_CURR', 'DAYS_CREDIT']].groupby(by = ['SK_ID_CURR'])['DAYS_CREDIT'].count().reset_index().rename(index=str, columns={'DAYS_CREDIT': 'BUREAU_LOAN_COUNT'})
app_train_bureau = prev_app_merge.merge(PAST_LOANS_PER_CUS, on = ['SK_ID_CURR'], how = 'left')
print(app_train_bureau.shape)
app_train_bureau.head() | Home Credit Default Risk |
17,544,955 | x = pd.read_csv('sample_submission_01.csv')
x.head()
<set_options> | BUREAU_LOAN_TYPES = bureau[['SK_ID_CURR', 'CREDIT_TYPE']].groupby(by = ['SK_ID_CURR'])['CREDIT_TYPE'].nunique().reset_index().rename(index=str, columns={'CREDIT_TYPE': 'BUREAU_LOAN_TYPES'})
app_train_bureau = app_train_bureau.merge(BUREAU_LOAN_TYPES, on = ['SK_ID_CURR'], how = 'left' ).fillna(0)
print(app_train_bureau.shape)
app_train_bureau.head() | Home Credit Default Risk |
17,544,955 | warnings.filterwarnings('ignore')
<load_from_csv> | app_train_bureau['AVERAGE_LOAN_TYPE'] = app_train_bureau['BUREAU_LOAN_COUNT']/app_train_bureau['BUREAU_LOAN_TYPES']
app_train_bureau = app_train_bureau.fillna(0)
print(app_train_bureau.shape)
app_train_bureau.head() | Home Credit Default Risk |
17,544,955 | train = pd.read_csv('.. /input/nlp-getting-started/train.csv', usecols=['id','text','target'])
test = pd.read_csv('.. /input/nlp-getting-started/test.csv', usecols=['id','text'])
sample = pd.read_csv('.. /input/nlp-getting-started/sample_submission.csv' )<categorify> | del app_train_bureau['BUREAU_LOAN_COUNT'], app_train_bureau['BUREAU_LOAN_TYPES']
app_train_bureau.head() | Home Credit Default Risk |
17,544,955 | %%time
def clean(tweet):
tweet = re.sub(r"\x89Û_", "", tweet)
tweet = re.sub(r"\x89ÛÒ", "", tweet)
tweet = re.sub(r"\x89ÛÓ", "", tweet)
tweet = re.sub(r"\x89ÛÏWhen", "When", tweet)
tweet = re.sub(r"\x89ÛÏ", "", tweet)
tweet = re.sub(r"China\x89Ûªs", "China's", tweet)
tweet = re.sub(r"let\x89Ûªs", "let's", tweet)
tweet = re.sub(r"\x89Û÷", "", tweet)
tweet = re.sub(r"\x89Ûª", "", tweet)
tweet = re.sub(r"\x89Û\x9d", "", tweet)
tweet = re.sub(r"å_", "", tweet)
tweet = re.sub(r"\x89Û¢", "", tweet)
tweet = re.sub(r"\x89Û¢åÊ", "", tweet)
tweet = re.sub(r"fromåÊwounds", "from wounds", tweet)
tweet = re.sub(r"åÊ", "", tweet)
tweet = re.sub(r"åÈ", "", tweet)
tweet = re.sub(r"JapÌ_n", "Japan", tweet)
tweet = re.sub(r"Ì©", "e", tweet)
tweet = re.sub(r"å¨", "", tweet)
tweet = re.sub(r"Surṳ", "Suruc", tweet)
tweet = re.sub(r"åÇ", "", tweet)
tweet = re.sub(r"å£3million", "3 million", tweet)
tweet = re.sub(r"åÀ", "", tweet)
tweet = re.sub(r"he's", "he is", tweet)
tweet = re.sub(r"there's", "there is", tweet)
tweet = re.sub(r"We're", "We are", tweet)
tweet = re.sub(r"That's", "That is", tweet)
tweet = re.sub(r"won't", "will not", tweet)
tweet = re.sub(r"they're", "they are", tweet)
tweet = re.sub(r"Can't", "Cannot", tweet)
tweet = re.sub(r"wasn't", "was not", tweet)
tweet = re.sub(r"don\x89Ûªt", "do not", tweet)
tweet = re.sub(r"aren't", "are not", tweet)
tweet = re.sub(r"isn't", "is not", tweet)
tweet = re.sub(r"What's", "What is", tweet)
tweet = re.sub(r"haven't", "have not", tweet)
tweet = re.sub(r"hasn't", "has not", tweet)
tweet = re.sub(r"There's", "There is", tweet)
tweet = re.sub(r"He's", "He is", tweet)
tweet = re.sub(r"It's", "It is", tweet)
tweet = re.sub(r"You're", "You are", tweet)
tweet = re.sub(r"I'M", "I am", tweet)
tweet = re.sub(r"shouldn't", "should not", tweet)
tweet = re.sub(r"wouldn't", "would not", tweet)
tweet = re.sub(r"i'm", "I am", tweet)
tweet = re.sub(r"I\x89Ûªm", "I am", tweet)
tweet = re.sub(r"I'm", "I am", tweet)
tweet = re.sub(r"Isn't", "is not", tweet)
tweet = re.sub(r"Here's", "Here is", tweet)
tweet = re.sub(r"you've", "you have", tweet)
tweet = re.sub(r"you\x89Ûªve", "you have", tweet)
tweet = re.sub(r"we're", "we are", tweet)
tweet = re.sub(r"what's", "what is", tweet)
tweet = re.sub(r"couldn't", "could not", tweet)
tweet = re.sub(r"we've", "we have", tweet)
tweet = re.sub(r"it\x89Ûªs", "it is", tweet)
tweet = re.sub(r"doesn\x89Ûªt", "does not", tweet)
tweet = re.sub(r"It\x89Ûªs", "It is", tweet)
tweet = re.sub(r"Here\x89Ûªs", "Here is", tweet)
tweet = re.sub(r"who's", "who is", tweet)
tweet = re.sub(r"I\x89Ûªve", "I have", tweet)
tweet = re.sub(r"y'all", "you all", tweet)
tweet = re.sub(r"can\x89Ûªt", "cannot", tweet)
tweet = re.sub(r"would've", "would have", tweet)
tweet = re.sub(r"it'll", "it will", tweet)
tweet = re.sub(r"we'll", "we will", tweet)
tweet = re.sub(r"wouldn\x89Ûªt", "would not", tweet)
tweet = re.sub(r"We've", "We have", tweet)
tweet = re.sub(r"he'll", "he will", tweet)
tweet = re.sub(r"Y'all", "You all", tweet)
tweet = re.sub(r"Weren't", "Were not", tweet)
tweet = re.sub(r"Didn't", "Did not", tweet)
tweet = re.sub(r"they'll", "they will", tweet)
tweet = re.sub(r"they'd", "they would", tweet)
tweet = re.sub(r"DON'T", "DO NOT", tweet)
tweet = re.sub(r"That\x89Ûªs", "That is", tweet)
tweet = re.sub(r"they've", "they have", tweet)
tweet = re.sub(r"i'd", "I would", tweet)
tweet = re.sub(r"should've", "should have", tweet)
tweet = re.sub(r"You\x89Ûªre", "You are", tweet)
tweet = re.sub(r"where's", "where is", tweet)
tweet = re.sub(r"Don\x89Ûªt", "Do not", tweet)
tweet = re.sub(r"we'd", "we would", tweet)
tweet = re.sub(r"i'll", "I will", tweet)
tweet = re.sub(r"weren't", "were not", tweet)
tweet = re.sub(r"They're", "They are", tweet)
tweet = re.sub(r"Can\x89Ûªt", "Cannot", tweet)
tweet = re.sub(r"you\x89Ûªll", "you will", tweet)
tweet = re.sub(r"I\x89Ûªd", "I would", tweet)
tweet = re.sub(r"let's", "let us", tweet)
tweet = re.sub(r"it's", "it is", tweet)
tweet = re.sub(r"can't", "cannot", tweet)
tweet = re.sub(r"don't", "do not", tweet)
tweet = re.sub(r"you're", "you are", tweet)
tweet = re.sub(r"i've", "I have", tweet)
tweet = re.sub(r"that's", "that is", tweet)
tweet = re.sub(r"i'll", "I will", tweet)
tweet = re.sub(r"doesn't", "does not", tweet)
tweet = re.sub(r"i'd", "I would", tweet)
tweet = re.sub(r"didn't", "did not", tweet)
tweet = re.sub(r"ain't", "am not", tweet)
tweet = re.sub(r"you'll", "you will", tweet)
tweet = re.sub(r"I've", "I have", tweet)
tweet = re.sub(r"Don't", "do not", tweet)
tweet = re.sub(r"I'll", "I will", tweet)
tweet = re.sub(r"I'd", "I would", tweet)
tweet = re.sub(r"Let's", "Let us", tweet)
tweet = re.sub(r"you'd", "You would", tweet)
tweet = re.sub(r"It's", "It is", tweet)
tweet = re.sub(r"Ain't", "am not", tweet)
tweet = re.sub(r"Haven't", "Have not", tweet)
tweet = re.sub(r"Could've", "Could have", tweet)
tweet = re.sub(r"youve", "you have", tweet)
tweet = re.sub(r"donå«t", "do not", tweet)
tweet = re.sub(r">", ">", tweet)
tweet = re.sub(r"<", "<", tweet)
tweet = re.sub(r"&", "&", tweet)
tweet = re.sub(r"w/e", "whatever", tweet)
tweet = re.sub(r"w/", "with", tweet)
tweet = re.sub(r"USAgov", "USA government", tweet)
tweet = re.sub(r"recentlu", "recently", tweet)
tweet = re.sub(r"Ph0tos", "Photos", tweet)
tweet = re.sub(r"amirite", "am I right", tweet)
tweet = re.sub(r"exp0sed", "exposed", tweet)
tweet = re.sub(r"<3", "love", tweet)
tweet = re.sub(r"amageddon", "armageddon", tweet)
tweet = re.sub(r"Trfc", "Traffic", tweet)
tweet = re.sub(r"8/5/2015", "2015-08-05", tweet)
tweet = re.sub(r"WindStorm", "Wind Storm", tweet)
tweet = re.sub(r"8/6/2015", "2015-08-06", tweet)
tweet = re.sub(r"10:38PM", "10:38 PM", tweet)
tweet = re.sub(r"10:30pm", "10:30 PM", tweet)
tweet = re.sub(r"16yr", "16 year", tweet)
tweet = re.sub(r"lmao", "laughing my ass off", tweet)
tweet = re.sub(r"TRAUMATISED", "traumatized", tweet)
tweet = re.sub(r"IranDeal", "Iran Deal", tweet)
tweet = re.sub(r"ArianaGrande", "Ariana Grande", tweet)
tweet = re.sub(r"camilacabello97", "camila cabello", tweet)
tweet = re.sub(r"RondaRousey", "Ronda Rousey", tweet)
tweet = re.sub(r"MTVHottest", "MTV Hottest", tweet)
tweet = re.sub(r"TrapMusic", "Trap Music", tweet)
tweet = re.sub(r"ProphetMuhammad", "Prophet Muhammad", tweet)
tweet = re.sub(r"PantherAttack", "Panther Attack", tweet)
tweet = re.sub(r"StrategicPatience", "Strategic Patience", tweet)
tweet = re.sub(r"socialnews", "social news", tweet)
tweet = re.sub(r"NASAHurricane", "NASA Hurricane", tweet)
tweet = re.sub(r"onlinecommunities", "online communities", tweet)
tweet = re.sub(r"humanconsumption", "human consumption", tweet)
tweet = re.sub(r"Typhoon-Devastated", "Typhoon Devastated", tweet)
tweet = re.sub(r"Meat-Loving", "Meat Loving", tweet)
tweet = re.sub(r"facialabuse", "facial abuse", tweet)
tweet = re.sub(r"LakeCounty", "Lake County", tweet)
tweet = re.sub(r"BeingAuthor", "Being Author", tweet)
tweet = re.sub(r"withheavenly", "with heavenly", tweet)
tweet = re.sub(r"thankU", "thank you", tweet)
tweet = re.sub(r"iTunesMusic", "iTunes Music", tweet)
tweet = re.sub(r"OffensiveContent", "Offensive Content", tweet)
tweet = re.sub(r"WorstSummerJob", "Worst Summer Job", tweet)
tweet = re.sub(r"HarryBeCareful", "Harry Be Careful", tweet)
tweet = re.sub(r"NASASolarSystem", "NASA Solar System", tweet)
tweet = re.sub(r"animalrescue", "animal rescue", tweet)
tweet = re.sub(r"KurtSchlichter", "Kurt Schlichter", tweet)
tweet = re.sub(r"aRmageddon", "armageddon", tweet)
tweet = re.sub(r"Throwingknifes", "Throwing knives", tweet)
tweet = re.sub(r"GodsLove", "God's Love", tweet)
tweet = re.sub(r"bookboost", "book boost", tweet)
tweet = re.sub(r"ibooklove", "I book love", tweet)
tweet = re.sub(r"NestleIndia", "Nestle India", tweet)
tweet = re.sub(r"realDonaldTrump", "Donald Trump", tweet)
tweet = re.sub(r"DavidVonderhaar", "David Vonderhaar", tweet)
tweet = re.sub(r"CecilTheLion", "Cecil The Lion", tweet)
tweet = re.sub(r"weathernetwork", "weather network", tweet)
tweet = re.sub(r"withBioterrorism&use", "with Bioterrorism & use", tweet)
tweet = re.sub(r"Hostage&2", "Hostage & 2", tweet)
tweet = re.sub(r"GOPDebate", "GOP Debate", tweet)
tweet = re.sub(r"RickPerry", "Rick Perry", tweet)
tweet = re.sub(r"frontpage", "front page", tweet)
tweet = re.sub(r"NewsInTweets", "News In Tweets", tweet)
tweet = re.sub(r"ViralSpell", "Viral Spell", tweet)
tweet = re.sub(r"til_now", "until now", tweet)
tweet = re.sub(r"volcanoinRussia", "volcano in Russia", tweet)
tweet = re.sub(r"ZippedNews", "Zipped News", tweet)
tweet = re.sub(r"MicheleBachman", "Michele Bachman", tweet)
tweet = re.sub(r"53inch", "53 inch", tweet)
tweet = re.sub(r"KerrickTrial", "Kerrick Trial", tweet)
tweet = re.sub(r"abstorm", "Alberta Storm", tweet)
tweet = re.sub(r"Beyhive", "Beyonce hive", tweet)
tweet = re.sub(r"IDFire", "Idaho Fire", tweet)
tweet = re.sub(r"DETECTADO", "Detected", tweet)
tweet = re.sub(r"RockyFire", "Rocky Fire", tweet)
tweet = re.sub(r"Listen/Buy", "Listen / Buy", tweet)
tweet = re.sub(r"NickCannon", "Nick Cannon", tweet)
tweet = re.sub(r"FaroeIslands", "Faroe Islands", tweet)
tweet = re.sub(r"yycstorm", "Calgary Storm", tweet)
tweet = re.sub(r"IDPs:", "Internally Displaced People :", tweet)
tweet = re.sub(r"ArtistsUnited", "Artists United", tweet)
tweet = re.sub(r"ClaytonBryant", "Clayton Bryant", tweet)
tweet = re.sub(r"jimmyfallon", "jimmy fallon", tweet)
tweet = re.sub(r"justinbieber", "justin bieber", tweet)
tweet = re.sub(r"UTC2015", "UTC 2015", tweet)
tweet = re.sub(r"Time2015", "Time 2015", tweet)
tweet = re.sub(r"djicemoon", "dj icemoon", tweet)
tweet = re.sub(r"LivingSafely", "Living Safely", tweet)
tweet = re.sub(r"FIFA16", "Fifa 2016", tweet)
tweet = re.sub(r"thisiswhywecanthavenicethings", "this is why we cannot have nice things", tweet)
tweet = re.sub(r"bbcnews", "bbc news", tweet)
tweet = re.sub(r"UndergroundRailraod", "Underground Railraod", tweet)
tweet = re.sub(r"c4news", "c4 news", tweet)
tweet = re.sub(r"OBLITERATION", "obliteration", tweet)
tweet = re.sub(r"MUDSLIDE", "mudslide", tweet)
tweet = re.sub(r"NoSurrender", "No Surrender", tweet)
tweet = re.sub(r"NotExplained", "Not Explained", tweet)
tweet = re.sub(r"greatbritishbakeoff", "great british bake off", tweet)
tweet = re.sub(r"LondonFire", "London Fire", tweet)
tweet = re.sub(r"KOTAWeather", "KOTA Weather", tweet)
tweet = re.sub(r"LuchaUnderground", "Lucha Underground", tweet)
tweet = re.sub(r"KOIN6News", "KOIN 6 News", tweet)
tweet = re.sub(r"LiveOnK2", "Live On K2", tweet)
tweet = re.sub(r"9NewsGoldCoast", "9 News Gold Coast", tweet)
tweet = re.sub(r"nikeplus", "nike plus", tweet)
tweet = re.sub(r"david_cameron", "David Cameron", tweet)
tweet = re.sub(r"peterjukes", "Peter Jukes", tweet)
tweet = re.sub(r"JamesMelville", "James Melville", tweet)
tweet = re.sub(r"megynkelly", "Megyn Kelly", tweet)
tweet = re.sub(r"cnewslive", "C News Live", tweet)
tweet = re.sub(r"JamaicaObserver", "Jamaica Observer", tweet)
tweet = re.sub(r"TweetLikeItsSeptember11th2001", "Tweet like it is september 11th 2001", tweet)
tweet = re.sub(r"cbplawyers", "cbp lawyers", tweet)
tweet = re.sub(r"fewmoretweets", "few more tweets", tweet)
tweet = re.sub(r"BlackLivesMatter", "Black Lives Matter", tweet)
tweet = re.sub(r"cjoyner", "Chris Joyner", tweet)
tweet = re.sub(r"ENGvAUS", "England vs Australia", tweet)
tweet = re.sub(r"ScottWalker", "Scott Walker", tweet)
tweet = re.sub(r"MikeParrActor", "Michael Parr", tweet)
tweet = re.sub(r"4PlayThursdays", "Foreplay Thursdays", tweet)
tweet = re.sub(r"TGF2015", "Tontitown Grape Festival", tweet)
tweet = re.sub(r"realmandyrain", "Mandy Rain", tweet)
tweet = re.sub(r"GraysonDolan", "Grayson Dolan", tweet)
tweet = re.sub(r"ApolloBrown", "Apollo Brown", tweet)
tweet = re.sub(r"saddlebrooke", "Saddlebrooke", tweet)
tweet = re.sub(r"TontitownGrape", "Tontitown Grape", tweet)
tweet = re.sub(r"AbbsWinston", "Abbs Winston", tweet)
tweet = re.sub(r"ShaunKing", "Shaun King", tweet)
tweet = re.sub(r"MeekMill", "Meek Mill", tweet)
tweet = re.sub(r"TornadoGiveaway", "Tornado Giveaway", tweet)
tweet = re.sub(r"GRupdates", "GR updates", tweet)
tweet = re.sub(r"SouthDowns", "South Downs", tweet)
tweet = re.sub(r"braininjury", "brain injury", tweet)
tweet = re.sub(r"auspol", "Australian politics", tweet)
tweet = re.sub(r"PlannedParenthood", "Planned Parenthood", tweet)
tweet = re.sub(r"calgaryweather", "Calgary Weather", tweet)
tweet = re.sub(r"weallheartonedirection", "we all heart one direction", tweet)
tweet = re.sub(r"edsheeran", "Ed Sheeran", tweet)
tweet = re.sub(r"TrueHeroes", "True Heroes", tweet)
tweet = re.sub(r"S3XLEAK", "sex leak", tweet)
tweet = re.sub(r"ComplexMag", "Complex Magazine", tweet)
tweet = re.sub(r"TheAdvocateMag", "The Advocate Magazine", tweet)
tweet = re.sub(r"CityofCalgary", "City of Calgary", tweet)
tweet = re.sub(r"EbolaOutbreak", "Ebola Outbreak", tweet)
tweet = re.sub(r"SummerFate", "Summer Fate", tweet)
tweet = re.sub(r"RAmag", "Royal Academy Magazine", tweet)
tweet = re.sub(r"offers2go", "offers to go", tweet)
tweet = re.sub(r"foodscare", "food scare", tweet)
tweet = re.sub(r"MNPDNashville", "Metropolitan Nashville Police Department", tweet)
tweet = re.sub(r"TfLBusAlerts", "TfL Bus Alerts", tweet)
tweet = re.sub(r"GamerGate", "Gamer Gate", tweet)
tweet = re.sub(r"IHHen", "Humanitarian Relief", tweet)
tweet = re.sub(r"spinningbot", "spinning bot", tweet)
tweet = re.sub(r"ModiMinistry", "Modi Ministry", tweet)
tweet = re.sub(r"TAXIWAYS", "taxi ways", tweet)
tweet = re.sub(r"Calum5SOS", "Calum Hood", tweet)
tweet = re.sub(r"po_st", "po.st", tweet)
tweet = re.sub(r"scoopit", "scoop.it", tweet)
tweet = re.sub(r"UltimaLucha", "Ultima Lucha", tweet)
tweet = re.sub(r"JonathanFerrell", "Jonathan Ferrell", tweet)
tweet = re.sub(r"aria_ahrary", "Aria Ahrary", tweet)
tweet = re.sub(r"rapidcity", "Rapid City", tweet)
tweet = re.sub(r"OutBid", "outbid", tweet)
tweet = re.sub(r"lavenderpoetrycafe", "lavender poetry cafe", tweet)
tweet = re.sub(r"EudryLantiqua", "Eudry Lantiqua", tweet)
tweet = re.sub(r"15PM", "15 PM", tweet)
tweet = re.sub(r"OriginalFunko", "Funko", tweet)
tweet = re.sub(r"rightwaystan", "Richard Tan", tweet)
tweet = re.sub(r"CindyNoonan", "Cindy Noonan", tweet)
tweet = re.sub(r"RT_America", "RT America", tweet)
tweet = re.sub(r"narendramodi", "Narendra Modi", tweet)
tweet = re.sub(r"BakeOffFriends", "Bake Off Friends", tweet)
tweet = re.sub(r"TeamHendrick", "Hendrick Motorsports", tweet)
tweet = re.sub(r"alexbelloli", "Alex Belloli", tweet)
tweet = re.sub(r"itsjustinstuart", "Justin Stuart", tweet)
tweet = re.sub(r"gunsense", "gun sense", tweet)
tweet = re.sub(r"DebateQuestionsWeWantToHear", "debate questions we want to hear", tweet)
tweet = re.sub(r"RoyalCarribean", "Royal Carribean", tweet)
tweet = re.sub(r"samanthaturne19", "Samantha Turner", tweet)
tweet = re.sub(r"JonVoyage", "Jon Stewart", tweet)
tweet = re.sub(r"renew911health", "renew 911 health", tweet)
tweet = re.sub(r"SuryaRay", "Surya Ray", tweet)
tweet = re.sub(r"pattonoswalt", "Patton Oswalt", tweet)
tweet = re.sub(r"minhazmerchant", "Minhaz Merchant", tweet)
tweet = re.sub(r"TLVFaces", "Israel Diaspora Coalition", tweet)
tweet = re.sub(r"pmarca", "Marc Andreessen", tweet)
tweet = re.sub(r"pdx911", "Portland Police", tweet)
tweet = re.sub(r"jamaicaplain", "Jamaica Plain", tweet)
tweet = re.sub(r"Japton", "Arkansas", tweet)
tweet = re.sub(r"RouteComplex", "Route Complex", tweet)
tweet = re.sub(r"INSubcontinent", "Indian Subcontinent", tweet)
tweet = re.sub(r"NJTurnpike", "New Jersey Turnpike", tweet)
tweet = re.sub(r"Politifiact", "PolitiFact", tweet)
tweet = re.sub(r"Hiroshima70", "Hiroshima", tweet)
tweet = re.sub(r"GMMBC", "Greater Mt Moriah Baptist Church", tweet)
tweet = re.sub(r"versethe", "verse the", tweet)
tweet = re.sub(r"TubeStrike", "Tube Strike", tweet)
tweet = re.sub(r"MissionHills", "Mission Hills", tweet)
tweet = re.sub(r"ProtectDenaliWolves", "Protect Denali Wolves", tweet)
tweet = re.sub(r"NANKANA", "Nankana", tweet)
tweet = re.sub(r"SAHIB", "Sahib", tweet)
tweet = re.sub(r"PAKPATTAN", "Pakpattan", tweet)
tweet = re.sub(r"Newz_Sacramento", "News Sacramento", tweet)
tweet = re.sub(r"gofundme", "go fund me", tweet)
tweet = re.sub(r"pmharper", "Stephen Harper", tweet)
tweet = re.sub(r"IvanBerroa", "Ivan Berroa", tweet)
tweet = re.sub(r"LosDelSonido", "Los Del Sonido", tweet)
tweet = re.sub(r"bancodeseries", "banco de series", tweet)
tweet = re.sub(r"timkaine", "Tim Kaine", tweet)
tweet = re.sub(r"IdentityTheft", "Identity Theft", tweet)
tweet = re.sub(r"AllLivesMatter", "All Lives Matter", tweet)
tweet = re.sub(r"mishacollins", "Misha Collins", tweet)
tweet = re.sub(r"BillNeelyNBC", "Bill Neely", tweet)
tweet = re.sub(r"BeClearOnCancer", "be clear on cancer", tweet)
tweet = re.sub(r"Kowing", "Knowing", tweet)
tweet = re.sub(r"ScreamQueens", "Scream Queens", tweet)
tweet = re.sub(r"AskCharley", "Ask Charley", tweet)
tweet = re.sub(r"BlizzHeroes", "Heroes of the Storm", tweet)
tweet = re.sub(r"BradleyBrad47", "Bradley Brad", tweet)
tweet = re.sub(r"HannaPH", "Typhoon Hanna", tweet)
tweet = re.sub(r"meinlcymbals", "MEINL Cymbals", tweet)
tweet = re.sub(r"Ptbo", "Peterborough", tweet)
tweet = re.sub(r"cnnbrk", "CNN Breaking News", tweet)
tweet = re.sub(r"IndianNews", "Indian News", tweet)
tweet = re.sub(r"savebees", "save bees", tweet)
tweet = re.sub(r"GreenHarvard", "Green Harvard", tweet)
tweet = re.sub(r"StandwithPP", "Stand with planned parenthood", tweet)
tweet = re.sub(r"hermancranston", "Herman Cranston", tweet)
tweet = re.sub(r"WMUR9", "WMUR-TV", tweet)
tweet = re.sub(r"RockBottomRadFM", "Rock Bottom Radio", tweet)
tweet = re.sub(r"ameenshaikh3", "Ameen Shaikh", tweet)
tweet = re.sub(r"ProSyn", "Project Syndicate", tweet)
tweet = re.sub(r"Daesh", "ISIS", tweet)
tweet = re.sub(r"s2g", "swear to god", tweet)
tweet = re.sub(r"listenlive", "listen live", tweet)
tweet = re.sub(r"CDCgov", "Centers for Disease Control and Prevention", tweet)
tweet = re.sub(r"FoxNew", "Fox News", tweet)
tweet = re.sub(r"CBSBigBrother", "Big Brother", tweet)
tweet = re.sub(r"JulieDiCaro", "Julie DiCaro", tweet)
tweet = re.sub(r"theadvocatemag", "The Advocate Magazine", tweet)
tweet = re.sub(r"RohnertParkDPS", "Rohnert Park Police Department", tweet)
tweet = re.sub(r"THISIZBWRIGHT", "Bonnie Wright", tweet)
tweet = re.sub(r"Popularmmos", "Popular MMOs", tweet)
tweet = re.sub(r"WildHorses", "Wild Horses", tweet)
tweet = re.sub(r"FantasticFour", "Fantastic Four", tweet)
tweet = re.sub(r"HORNDALE", "Horndale", tweet)
tweet = re.sub(r"PINER", "Piner", tweet)
tweet = re.sub(r"BathAndNorthEastSomerset", "Bath and North East Somerset", tweet)
tweet = re.sub(r"thatswhatfriendsarefor", "that is what friends are for", tweet)
tweet = re.sub(r"residualincome", "residual income", tweet)
tweet = re.sub(r"YahooNewsDigest", "Yahoo News Digest", tweet)
tweet = re.sub(r"MalaysiaAirlines", "Malaysia Airlines", tweet)
tweet = re.sub(r"AmazonDeals", "Amazon Deals", tweet)
tweet = re.sub(r"MissCharleyWebb", "Charley Webb", tweet)
tweet = re.sub(r"shoalstraffic", "shoals traffic", tweet)
tweet = re.sub(r"GeorgeFoster72", "George Foster", tweet)
tweet = re.sub(r"pop2015", "pop 2015", tweet)
tweet = re.sub(r"_PokemonCards_", "Pokemon Cards", tweet)
tweet = re.sub(r"DianneG", "Dianne Gallagher", tweet)
tweet = re.sub(r"KashmirConflict", "Kashmir Conflict", tweet)
tweet = re.sub(r"BritishBakeOff", "British Bake Off", tweet)
tweet = re.sub(r"FreeKashmir", "Free Kashmir", tweet)
tweet = re.sub(r"mattmosley", "Matt Mosley", tweet)
tweet = re.sub(r"BishopFred", "Bishop Fred", tweet)
tweet = re.sub(r"EndConflict", "End Conflict", tweet)
tweet = re.sub(r"EndOccupation", "End Occupation", tweet)
tweet = re.sub(r"UNHEALED", "unhealed", tweet)
tweet = re.sub(r"CharlesDagnall", "Charles Dagnall", tweet)
tweet = re.sub(r"Latestnews", "Latest news", tweet)
tweet = re.sub(r"KindleCountdown", "Kindle Countdown", tweet)
tweet = re.sub(r"NoMoreHandouts", "No More Handouts", tweet)
tweet = re.sub(r"datingtips", "dating tips", tweet)
tweet = re.sub(r"charlesadler", "Charles Adler", tweet)
tweet = re.sub(r"twia", "Texas Windstorm Insurance Association", tweet)
tweet = re.sub(r"txlege", "Texas Legislature", tweet)
tweet = re.sub(r"WindstormInsurer", "Windstorm Insurer", tweet)
tweet = re.sub(r"Newss", "News", tweet)
tweet = re.sub(r"hempoil", "hemp oil", tweet)
tweet = re.sub(r"CommoditiesAre", "Commodities are", tweet)
tweet = re.sub(r"tubestrike", "tube strike", tweet)
tweet = re.sub(r"JoeNBC", "Joe Scarborough", tweet)
tweet = re.sub(r"LiteraryCakes", "Literary Cakes", tweet)
tweet = re.sub(r"TI5", "The International 5", tweet)
tweet = re.sub(r"thehill", "the hill", tweet)
tweet = re.sub(r"3others", "3 others", tweet)
tweet = re.sub(r"stighefootball", "Sam Tighe", tweet)
tweet = re.sub(r"whatstheimportantvideo", "what is the important video", tweet)
tweet = re.sub(r"ClaudioMeloni", "Claudio Meloni", tweet)
tweet = re.sub(r"DukeSkywalker", "Duke Skywalker", tweet)
tweet = re.sub(r"carsonmwr", "Fort Carson", tweet)
tweet = re.sub(r"offdishduty", "off dish duty", tweet)
tweet = re.sub(r"andword", "and word", tweet)
tweet = re.sub(r"rhodeisland", "Rhode Island", tweet)
tweet = re.sub(r"easternoregon", "Eastern Oregon", tweet)
tweet = re.sub(r"WAwildfire", "Washington Wildfire", tweet)
tweet = re.sub(r"fingerrockfire", "Finger Rock Fire", tweet)
tweet = re.sub(r"57am", "57 am", tweet)
tweet = re.sub(r"fingerrockfire", "Finger Rock Fire", tweet)
tweet = re.sub(r"JacobHoggard", "Jacob Hoggard", tweet)
tweet = re.sub(r"newnewnew", "new new new", tweet)
tweet = re.sub(r"under50", "under 50", tweet)
tweet = re.sub(r"getitbeforeitsgone", "get it before it is gone", tweet)
tweet = re.sub(r"freshoutofthebox", "fresh out of the box", tweet)
tweet = re.sub(r"amwriting", "am writing", tweet)
tweet = re.sub(r"Bokoharm", "Boko Haram", tweet)
tweet = re.sub(r"Nowlike", "Now like", tweet)
tweet = re.sub(r"seasonfrom", "season from", tweet)
tweet = re.sub(r"epicente", "epicenter", tweet)
tweet = re.sub(r"epicenterr", "epicenter", tweet)
tweet = re.sub(r"sicklife", "sick life", tweet)
tweet = re.sub(r"yycweather", "Calgary Weather", tweet)
tweet = re.sub(r"calgarysun", "Calgary Sun", tweet)
tweet = re.sub(r"approachng", "approaching", tweet)
tweet = re.sub(r"evng", "evening", tweet)
tweet = re.sub(r"Sumthng", "something", tweet)
tweet = re.sub(r"EllenPompeo", "Ellen Pompeo", tweet)
tweet = re.sub(r"shondarhimes", "Shonda Rhimes", tweet)
tweet = re.sub(r"ABCNetwork", "ABC Network", tweet)
tweet = re.sub(r"SushmaSwaraj", "Sushma Swaraj", tweet)
tweet = re.sub(r"pray4japan", "Pray for Japan", tweet)
tweet = re.sub(r"hope4japan", "Hope for Japan", tweet)
tweet = re.sub(r"Illusionimagess", "Illusion images", tweet)
tweet = re.sub(r"SummerUnderTheStars", "Summer Under The Stars", tweet)
tweet = re.sub(r"ShallWeDance", "Shall We Dance", tweet)
tweet = re.sub(r"TCMParty", "TCM Party", tweet)
tweet = re.sub(r"marijuananews", "marijuana news", tweet)
tweet = re.sub(r"onbeingwithKristaTippett", "on being with Krista Tippett", tweet)
tweet = re.sub(r"Beingtweets", "Being tweets", tweet)
tweet = re.sub(r"newauthors", "new authors", tweet)
tweet = re.sub(r"remedyyyy", "remedy", tweet)
tweet = re.sub(r"44PM", "44 PM", tweet)
tweet = re.sub(r"HeadlinesApp", "Headlines App", tweet)
tweet = re.sub(r"40PM", "40 PM", tweet)
tweet = re.sub(r"myswc", "Severe Weather Center", tweet)
tweet = re.sub(r"ithats", "that is", tweet)
tweet = re.sub(r"icouldsitinthismomentforever", "I could sit in this moment forever", tweet)
tweet = re.sub(r"FatLoss", "Fat Loss", tweet)
tweet = re.sub(r"02PM", "02 PM", tweet)
tweet = re.sub(r"MetroFmTalk", "Metro Fm Talk", tweet)
tweet = re.sub(r"Bstrd", "bastard", tweet)
tweet = re.sub(r"bldy", "bloody", tweet)
tweet = re.sub(r"MetrofmTalk", "Metro Fm Talk", tweet)
tweet = re.sub(r"terrorismturn", "terrorism turn", tweet)
tweet = re.sub(r"BBCNewsAsia", "BBC News Asia", tweet)
tweet = re.sub(r"BehindTheScenes", "Behind The Scenes", tweet)
tweet = re.sub(r"GeorgeTakei", "George Takei", tweet)
tweet = re.sub(r"WomensWeeklyMag", "Womens Weekly Magazine", tweet)
tweet = re.sub(r"SurvivorsGuidetoEarth", "Survivors Guide to Earth", tweet)
tweet = re.sub(r"incubusband", "incubus band", tweet)
tweet = re.sub(r"Babypicturethis", "Baby picture this", tweet)
tweet = re.sub(r"BombEffects", "Bomb Effects", tweet)
tweet = re.sub(r"win10", "Windows 10", tweet)
tweet = re.sub(r"idkidk", "I do not know I do not know", tweet)
tweet = re.sub(r"TheWalkingDead", "The Walking Dead", tweet)
tweet = re.sub(r"amyschumer", "Amy Schumer", tweet)
tweet = re.sub(r"crewlist", "crew list", tweet)
tweet = re.sub(r"Erdogans", "Erdogan", tweet)
tweet = re.sub(r"BBCLive", "BBC Live", tweet)
tweet = re.sub(r"TonyAbbottMHR", "Tony Abbott", tweet)
tweet = re.sub(r"paulmyerscough", "Paul Myerscough", tweet)
tweet = re.sub(r"georgegallagher", "George Gallagher", tweet)
tweet = re.sub(r"JimmieJohnson", "Jimmie Johnson", tweet)
tweet = re.sub(r"pctool", "pc tool", tweet)
tweet = re.sub(r"DoingHashtagsRight", "Doing Hashtags Right", tweet)
tweet = re.sub(r"ThrowbackThursday", "Throwback Thursday", tweet)
tweet = re.sub(r"SnowBackSunday", "Snowback Sunday", tweet)
tweet = re.sub(r"LakeEffect", "Lake Effect", tweet)
tweet = re.sub(r"RTphotographyUK", "Richard Thomas Photography UK", tweet)
tweet = re.sub(r"BigBang_CBS", "Big Bang CBS", tweet)
tweet = re.sub(r"writerslife", "writers life", tweet)
tweet = re.sub(r"NaturalBirth", "Natural Birth", tweet)
tweet = re.sub(r"UnusualWords", "Unusual Words", tweet)
tweet = re.sub(r"wizkhalifa", "Wiz Khalifa", tweet)
tweet = re.sub(r"acreativedc", "a creative DC", tweet)
tweet = re.sub(r"vscodc", "vsco DC", tweet)
tweet = re.sub(r"VSCOcam", "vsco camera", tweet)
tweet = re.sub(r"TheBEACHDC", "The beach DC", tweet)
tweet = re.sub(r"buildingmuseum", "building museum", tweet)
tweet = re.sub(r"WorldOil", "World Oil", tweet)
tweet = re.sub(r"redwedding", "red wedding", tweet)
tweet = re.sub(r"AmazingRaceCanada", "Amazing Race Canada", tweet)
tweet = re.sub(r"WakeUpAmerica", "Wake Up America", tweet)
tweet = re.sub(r"\\Allahuakbar\", "Allahu Akbar", tweet)
tweet = re.sub(r"bleased", "blessed", tweet)
tweet = re.sub(r"nigeriantribune", "Nigerian Tribune", tweet)
tweet = re.sub(r"HIDEO_KOJIMA_EN", "Hideo Kojima", tweet)
tweet = re.sub(r"FusionFestival", "Fusion Festival", tweet)
tweet = re.sub(r"50Mixed", "50 Mixed", tweet)
tweet = re.sub(r"NoAgenda", "No Agenda", tweet)
tweet = re.sub(r"WhiteGenocide", "White Genocide", tweet)
tweet = re.sub(r"dirtylying", "dirty lying", tweet)
tweet = re.sub(r"SyrianRefugees", "Syrian Refugees", tweet)
tweet = re.sub(r"changetheworld", "change the world", tweet)
tweet = re.sub(r"Ebolacase", "Ebola case", tweet)
tweet = re.sub(r"mcgtech", "mcg technologies", tweet)
tweet = re.sub(r"withweapons", "with weapons", tweet)
tweet = re.sub(r"advancedwarfare", "advanced warfare", tweet)
tweet = re.sub(r"letsFootball", "let us Football", tweet)
tweet = re.sub(r"LateNiteMix", "late night mix", tweet)
tweet = re.sub(r"PhilCollinsFeed", "Phil Collins", tweet)
tweet = re.sub(r"RudyHavenstein", "Rudy Havenstein", tweet)
tweet = re.sub(r"22PM", "22 PM", tweet)
tweet = re.sub(r"54am", "54 AM", tweet)
tweet = re.sub(r"38am", "38 AM", tweet)
tweet = re.sub(r"OldFolkExplainStuff", "Old Folk Explain Stuff", tweet)
tweet = re.sub(r"BlacklivesMatter", "Black Lives Matter", tweet)
tweet = re.sub(r"InsaneLimits", "Insane Limits", tweet)
tweet = re.sub(r"youcantsitwithus", "you cannot sit with us", tweet)
tweet = re.sub(r"2k15", "2015", tweet)
tweet = re.sub(r"TheIran", "Iran", tweet)
tweet = re.sub(r"JimmyFallon", "Jimmy Fallon", tweet)
tweet = re.sub(r"AlbertBrooks", "Albert Brooks", tweet)
tweet = re.sub(r"defense_news", "defense news", tweet)
tweet = re.sub(r"nuclearrcSA", "Nuclear Risk Control Self Assessment", tweet)
tweet = re.sub(r"Auspol", "Australia Politics", tweet)
tweet = re.sub(r"NuclearPower", "Nuclear Power", tweet)
tweet = re.sub(r"WhiteTerrorism", "White Terrorism", tweet)
tweet = re.sub(r"truthfrequencyradio", "Truth Frequency Radio", tweet)
tweet = re.sub(r"ErasureIsNotEquality", "Erasure is not equality", tweet)
tweet = re.sub(r"ProBonoNews", "Pro Bono News", tweet)
tweet = re.sub(r"JakartaPost", "Jakarta Post", tweet)
tweet = re.sub(r"toopainful", "too painful", tweet)
tweet = re.sub(r"melindahaunton", "Melinda Haunton", tweet)
tweet = re.sub(r"NoNukes", "No Nukes", tweet)
tweet = re.sub(r"curryspcworld", "Currys PC World", tweet)
tweet = re.sub(r"ineedcake", "I need cake", tweet)
tweet = re.sub(r"blackforestgateau", "black forest gateau", tweet)
tweet = re.sub(r"BBCOne", "BBC One", tweet)
tweet = re.sub(r"AlexxPage", "Alex Page", tweet)
tweet = re.sub(r"jonathanserrie", "Jonathan Serrie", tweet)
tweet = re.sub(r"SocialJerkBlog", "Social Jerk Blog", tweet)
tweet = re.sub(r"ChelseaVPeretti", "Chelsea Peretti", tweet)
tweet = re.sub(r"irongiant", "iron giant", tweet)
tweet = re.sub(r"RonFunches", "Ron Funches", tweet)
tweet = re.sub(r"TimCook", "Tim Cook", tweet)
tweet = re.sub(r"sebastianstanisaliveandwell", "Sebastian Stan is alive and well", tweet)
tweet = re.sub(r"Madsummer", "Mad summer", tweet)
tweet = re.sub(r"NowYouKnow", "Now you know", tweet)
tweet = re.sub(r"concertphotography", "concert photography", tweet)
tweet = re.sub(r"TomLandry", "Tom Landry", tweet)
tweet = re.sub(r"showgirldayoff", "show girl day off", tweet)
tweet = re.sub(r"Yougslavia", "Yugoslavia", tweet)
tweet = re.sub(r"QuantumDataInformatics", "Quantum Data Informatics", tweet)
tweet = re.sub(r"FromTheDesk", "From The Desk", tweet)
tweet = re.sub(r"TheaterTrial", "Theater Trial", tweet)
tweet = re.sub(r"CatoInstitute", "Cato Institute", tweet)
tweet = re.sub(r"EmekaGift", "Emeka Gift", tweet)
tweet = re.sub(r"LetsBe_Rational", "Let us be rational", tweet)
tweet = re.sub(r"Cynicalreality", "Cynical reality", tweet)
tweet = re.sub(r"FredOlsenCruise", "Fred Olsen Cruise", tweet)
tweet = re.sub(r"NotSorry", "not sorry", tweet)
tweet = re.sub(r"UseYourWords", "use your words", tweet)
tweet = re.sub(r"WordoftheDay", "word of the day", tweet)
tweet = re.sub(r"Dictionarycom", "Dictionary.com", tweet)
tweet = re.sub(r"TheBrooklynLife", "The Brooklyn Life", tweet)
tweet = re.sub(r"jokethey", "joke they", tweet)
tweet = re.sub(r"nflweek1picks", "NFL week 1 picks", tweet)
tweet = re.sub(r"uiseful", "useful", tweet)
tweet = re.sub(r"JusticeDotOrg", "The American Association for Justice", tweet)
tweet = re.sub(r"autoaccidents", "auto accidents", tweet)
tweet = re.sub(r"SteveGursten", "Steve Gursten", tweet)
tweet = re.sub(r"MichiganAutoLaw", "Michigan Auto Law", tweet)
tweet = re.sub(r"birdgang", "bird gang", tweet)
tweet = re.sub(r"nflnetwork", "NFL Network", tweet)
tweet = re.sub(r"NYDNSports", "NY Daily News Sports", tweet)
tweet = re.sub(r"RVacchianoNYDN", "Ralph Vacchiano NY Daily News", tweet)
tweet = re.sub(r"EdmontonEsks", "Edmonton Eskimos", tweet)
tweet = re.sub(r"david_brelsford", "David Brelsford", tweet)
tweet = re.sub(r"TOI_India", "The Times of India", tweet)
tweet = re.sub(r"hegot", "he got", tweet)
tweet = re.sub(r"SkinsOn9", "Skins on 9", tweet)
tweet = re.sub(r"sothathappened", "so that happened", tweet)
tweet = re.sub(r"LCOutOfDoors", "LC Out Of Doors", tweet)
tweet = re.sub(r"NationFirst", "Nation First", tweet)
tweet = re.sub(r"IndiaToday", "India Today", tweet)
tweet = re.sub(r"HLPS", "helps", tweet)
tweet = re.sub(r"HOSTAGESTHROSW", "hostages throw", tweet)
tweet = re.sub(r"SNCTIONS", "sanctions", tweet)
tweet = re.sub(r"BidTime", "Bid Time", tweet)
tweet = re.sub(r"crunchysensible", "crunchy sensible", tweet)
tweet = re.sub(r"RandomActsOfRomance", "Random acts of romance", tweet)
tweet = re.sub(r"MomentsAtHill", "Moments at hill", tweet)
tweet = re.sub(r"eatshit", "eat shit", tweet)
tweet = re.sub(r"liveleakfun", "live leak fun", tweet)
tweet = re.sub(r"SahelNews", "Sahel News", tweet)
tweet = re.sub(r"abc7newsbayarea", "ABC 7 News Bay Area", tweet)
tweet = re.sub(r"facilitiesmanagement", "facilities management", tweet)
tweet = re.sub(r"facilitydude", "facility dude", tweet)
tweet = re.sub(r"CampLogistics", "Camp logistics", tweet)
tweet = re.sub(r"alaskapublic", "Alaska public", tweet)
tweet = re.sub(r"MarketResearch", "Market Research", tweet)
tweet = re.sub(r"AccuracyEsports", "Accuracy Esports", tweet)
tweet = re.sub(r"TheBodyShopAust", "The Body Shop Australia", tweet)
tweet = re.sub(r"yychail", "Calgary hail", tweet)
tweet = re.sub(r"yyctraffic", "Calgary traffic", tweet)
tweet = re.sub(r"eliotschool", "eliot school", tweet)
tweet = re.sub(r"TheBrokenCity", "The Broken City", tweet)
tweet = re.sub(r"OldsFireDept", "Olds Fire Department", tweet)
tweet = re.sub(r"RiverComplex", "River Complex", tweet)
tweet = re.sub(r"fieldworksmells", "field work smells", tweet)
tweet = re.sub(r"IranElection", "Iran Election", tweet)
tweet = re.sub(r"glowng", "glowing", tweet)
tweet = re.sub(r"kindlng", "kindling", tweet)
tweet = re.sub(r"riggd", "rigged", tweet)
tweet = re.sub(r"slownewsday", "slow news day", tweet)
tweet = re.sub(r"MyanmarFlood", "Myanmar Flood", tweet)
tweet = re.sub(r"abc7chicago", "ABC 7 Chicago", tweet)
tweet = re.sub(r"copolitics", "Colorado Politics", tweet)
tweet = re.sub(r"AdilGhumro", "Adil Ghumro", tweet)
tweet = re.sub(r"netbots", "net bots", tweet)
tweet = re.sub(r"byebyeroad", "bye bye road", tweet)
tweet = re.sub(r"massiveflooding", "massive flooding", tweet)
tweet = re.sub(r"EndofUS", "End of United States", tweet)
tweet = re.sub(r"35PM", "35 PM", tweet)
tweet = re.sub(r"greektheatrela", "Greek Theatre Los Angeles", tweet)
tweet = re.sub(r"76mins", "76 minutes", tweet)
tweet = re.sub(r"publicsafetyfirst", "public safety first", tweet)
tweet = re.sub(r"livesmatter", "lives matter", tweet)
tweet = re.sub(r"myhometown", "my hometown", tweet)
tweet = re.sub(r"tankerfire", "tanker fire", tweet)
tweet = re.sub(r"MEMORIALDAY", "memorial day", tweet)
tweet = re.sub(r"MEMORIAL_DAY", "memorial day", tweet)
tweet = re.sub(r"instaxbooty", "instagram booty", tweet)
tweet = re.sub(r"Jerusalem_Post", "Jerusalem Post", tweet)
tweet = re.sub(r"WayneRooney_INA", "Wayne Rooney", tweet)
tweet = re.sub(r"VirtualReality", "Virtual Reality", tweet)
tweet = re.sub(r"OculusRift", "Oculus Rift", tweet)
tweet = re.sub(r"OwenJones84", "Owen Jones", tweet)
tweet = re.sub(r"jeremycorbyn", "Jeremy Corbyn", tweet)
tweet = re.sub(r"paulrogers002", "Paul Rogers", tweet)
tweet = re.sub(r"mortalkombatx", "Mortal Kombat X", tweet)
tweet = re.sub(r"mortalkombat", "Mortal Kombat", tweet)
tweet = re.sub(r"FilipeCoelho92", "Filipe Coelho", tweet)
tweet = re.sub(r"OnlyQuakeNews", "Only Quake News", tweet)
tweet = re.sub(r"kostumes", "costumes", tweet)
tweet = re.sub(r"YEEESSSS", "yes", tweet)
tweet = re.sub(r"ToshikazuKatayama", "Toshikazu Katayama", tweet)
tweet = re.sub(r"IntlDevelopment", "Intl Development", tweet)
tweet = re.sub(r"ExtremeWeather", "Extreme Weather", tweet)
tweet = re.sub(r"WereNotGruberVoters", "We are not gruber voters", tweet)
tweet = re.sub(r"NewsThousands", "News Thousands", tweet)
tweet = re.sub(r"EdmundAdamus", "Edmund Adamus", tweet)
tweet = re.sub(r"EyewitnessWV", "Eye witness WV", tweet)
tweet = re.sub(r"PhiladelphiaMuseu", "Philadelphia Museum", tweet)
tweet = re.sub(r"DublinComicCon", "Dublin Comic Con", tweet)
tweet = re.sub(r"NicholasBrendon", "Nicholas Brendon", tweet)
tweet = re.sub(r"Alltheway80s", "All the way 80s", tweet)
tweet = re.sub(r"FromTheField", "From the field", tweet)
tweet = re.sub(r"NorthIowa", "North Iowa", tweet)
tweet = re.sub(r"WillowFire", "Willow Fire", tweet)
tweet = re.sub(r"MadRiverComplex", "Mad River Complex", tweet)
tweet = re.sub(r"feelingmanly", "feeling manly", tweet)
tweet = re.sub(r"stillnotoverit", "still not over it", tweet)
tweet = re.sub(r"FortitudeValley", "Fortitude Valley", tweet)
tweet = re.sub(r"CoastpowerlineTramTr", "Coast powerline", tweet)
tweet = re.sub(r"ServicesGold", "Services Gold", tweet)
tweet = re.sub(r"NewsbrokenEmergency", "News broken emergency", tweet)
tweet = re.sub(r"Evaucation", "evacuation", tweet)
tweet = re.sub(r"leaveevacuateexitbe", "leave evacuate exit be", tweet)
tweet = re.sub(r"P_EOPLE", "PEOPLE", tweet)
tweet = re.sub(r"Tubestrike", "tube strike", tweet)
tweet = re.sub(r"CLASS_SICK", "CLASS SICK", tweet)
tweet = re.sub(r"localplumber", "local plumber", tweet)
tweet = re.sub(r"awesomejobsiri", "awesome job siri", tweet)
tweet = re.sub(r"PayForItHow", "Pay for it how", tweet)
tweet = re.sub(r"ThisIsAfrica", "This is Africa", tweet)
tweet = re.sub(r"crimeairnetwork", "crime air network", tweet)
tweet = re.sub(r"KimAcheson", "Kim Acheson", tweet)
tweet = re.sub(r"cityofcalgary", "City of Calgary", tweet)
tweet = re.sub(r"prosyndicate", "pro syndicate", tweet)
tweet = re.sub(r"660NEWS", "660 NEWS", tweet)
tweet = re.sub(r"BusInsMagazine", "Business Insurance Magazine", tweet)
tweet = re.sub(r"wfocus", "focus", tweet)
tweet = re.sub(r"ShastaDam", "Shasta Dam", tweet)
tweet = re.sub(r"go2MarkFranco", "Mark Franco", tweet)
tweet = re.sub(r"StephGHinojosa", "Steph Hinojosa", tweet)
tweet = re.sub(r"Nashgrier", "Nash Grier", tweet)
tweet = re.sub(r"NashNewVideo", "Nash new video", tweet)
tweet = re.sub(r"IWouldntGetElectedBecause", "I would not get elected because", tweet)
tweet = re.sub(r"SHGames", "Sledgehammer Games", tweet)
tweet = re.sub(r"bedhair", "bed hair", tweet)
tweet = re.sub(r"JoelHeyman", "Joel Heyman", tweet)
tweet = re.sub(r"viaYouTube", "via YouTube", tweet)
tweet = re.sub(r"https?:\/\/t.co\/[A-Za-z0-9]+", "", tweet)
punctuations = '@
for p in punctuations:
tweet = tweet.replace(p, f' {p} ')
tweet = tweet.replace('...', '...')
if '...' not in tweet:
tweet = tweet.replace('.. ', '...')
tweet = re.sub(r"MH370", "Malaysia Airlines Flight 370", tweet)
tweet = re.sub(r"m̼sica", "music", tweet)
tweet = re.sub(r"okwx", "Oklahoma City Weather", tweet)
tweet = re.sub(r"arwx", "Arkansas Weather", tweet)
tweet = re.sub(r"gawx", "Georgia Weather", tweet)
tweet = re.sub(r"scwx", "South Carolina Weather", tweet)
tweet = re.sub(r"cawx", "California Weather", tweet)
tweet = re.sub(r"tnwx", "Tennessee Weather", tweet)
tweet = re.sub(r"azwx", "Arizona Weather", tweet)
tweet = re.sub(r"alwx", "Alabama Weather", tweet)
tweet = re.sub(r"wordpressdotcom", "wordpress", tweet)
tweet = re.sub(r"usNWSgov", "United States National Weather Service", tweet)
tweet = re.sub(r"Suruc", "Sanliurfa", tweet)
tweet = re.sub(r"Bestnaijamade", "bestnaijamade", tweet)
tweet = re.sub(r"SOUDELOR", "Soudelor", tweet)
return tweet<feature_engineering> | def f(x):
if x == 'Closed':
y = 0
else:
y = 1
return y
bureau_fe1 = bureau
bureau_fe1['CREDIT_ACTIVE_CLOSED'] = bureau_fe1.apply(lambda x: f(x.CREDIT_ACTIVE), axis = 1)
bureau_fe1.head() | Home Credit Default Risk |
17,544,955 | train['text'] = train['text'].apply(lambda s : clean(s))<filter> | grp = bureau_fe1.groupby(by = ['SK_ID_CURR'])['CREDIT_ACTIVE_CLOSED'].mean().reset_index().rename(index=str, columns={'CREDIT_ACTIVE_CLOSED':'ACTIVE_LOANS_PERCENTAGE'})
app_train_bureau = app_train_bureau.merge(grp, on = ['SK_ID_CURR'], how = 'left')
del bureau_fe1['CREDIT_ACTIVE_CLOSED']
print(bureau_fe1.shape)
bureau_fe1.head() | Home Credit Default Risk |
17,544,955 | train[train.target == 0]<create_dataframe> | app_train_bureau = app_train_bureau.fillna(0)
app_train_bureau.head() | Home Credit Default Risk |
17,544,955 | train_cleaned_df = train.copy()<load_pretrained> | app_train_bureau['BUREAU_ENDDATE_FACT_DIFF'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT']
app_train_bureau['BUREAU_CREDIT_FACT_DIFF'] = bureau['DAYS_CREDIT'] - bureau['DAYS_ENDDATE_FACT']
app_train_bureau['BUREAU_CREDIT_ENDDATE_DIFF'] = bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE']
app_train_bureau['BUREAU_CREDIT_DEBT_RATIO'] = bureau['AMT_CREDIT_SUM_DEBT'] / bureau['AMT_CREDIT_SUM']
app_train_bureau['BUREAU_CREDIT_DEBT_DIFF'] = bureau['AMT_CREDIT_SUM_DEBT'] - bureau['AMT_CREDIT_SUM']
app_train_bureau = app_train_bureau.fillna(0)
app_train_bureau.head() | Home Credit Default Risk |
17,544,955 | tokenizer = AutoTokenizer.from_pretrained('bert-large-uncased')
bert = TFBertModel.from_pretrained('bert-large-uncased' )<string_transform> | app_train = app_train_bureau | Home Credit Default Risk |
17,544,955 | tokenizer('Shine on you crazy diamond.' )<string_transform> | ftr_app = app_train.drop(columns=['SK_ID_CURR','TARGET'])
target_app = app_train['TARGET']
train_x, valid_x, train_y, valid_y = train_test_split(ftr_app, target_app, test_size=0.3, random_state=2020)
train_x.shape, valid_x.shape | Home Credit Default Risk |
17,544,955 | print("max len of tweets",max([len(x.split())for x in train.text]))<string_transform> | def lgb_cv(num_leaves, learning_rate, n_estimators, subsample, colsample_bytree, reg_alpha, reg_lambda, x_data=None, y_data=None, n_splits=5, output='score'):
score = 0
kf = KFold(n_splits=n_splits)
models = []
for train_index, valid_index in kf.split(x_data):
x_train, y_train = x_data.reindex([train_index]), y_data.reindex([train_index])
x_valid, y_valid = x_data.reindex([valid_index]), y_data.reindex([valid_index])
model = LGBMClassifier(
num_leaves = int(num_leaves),
learning_rate = learning_rate,
n_estimators = int(n_estimators),
subsample = np.clip(subsample, 0, 1),
colsample_bytree = np.clip(colsample_bytree, 0, 1),
reg_alpha = reg_alpha,
reg_lambda = reg_lambda,
max_depth=16,
)
model.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric= 'auc', verbose= False,
early_stopping_rounds= 100)
models.append(model)
pred = model.predict_proba(valid_x)[:, 1]
true = valid_y
score += roc_auc_score(true, pred)/n_splits
if output == 'score':
return score
if output == 'model':
return models | Home Credit Default Risk |
17,544,955 | x_train = tokenizer(
text=train.text.tolist() ,
add_special_tokens=True,
max_length=73,
truncation=True,
padding=True,
return_tensors='tf',
return_token_type_ids = False,
return_attention_mask = True,
verbose = True)
<count_values> | func_fixed = partial(lgb_cv, x_data=train_x, y_data=train_y, n_splits=5, output='score')
lgbBO = BayesianOptimization(
func_fixed,
{
'num_leaves':(16, 1024),
'learning_rate':(0.0001, 0.1),
'n_estimators':(16, 1024),
'subsample':(0, 1),
'colsample_bytree':(0, 1),
'reg_alpha':(0, 10),
'reg_lambda':(0, 50),
},
random_state=2020
)
lgbBO.maximize(init_points=5, n_iter=10 ) | Home Credit Default Risk |
17,544,955 | train.target.value_counts()<choose_model_class> | clf = LGBMClassifier(
n_estimators=int(lgbBO.max['params']['n_estimators']),
learning_rate=lgbBO.max['params']['learning_rate'],
num_leaves=int(lgbBO.max['params']['num_leaves']),
subsample=lgbBO.max['params']['subsample'],
max_depth=16,
reg_alpha=lgbBO.max['params']['reg_alpha'],
reg_lambda=lgbBO.max['params']['reg_lambda'])
clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric= 'auc', verbose= 100,
early_stopping_rounds= 200 ) | Home Credit Default Risk |
17,544,955 | <choose_model_class><EOS> | test_merge = app_test.merge(prev_amt_agg, on='SK_ID_CURR', how='left', indicator=False)
test_merge = test_merge.merge(prev_approved, on='SK_ID_CURR', how='left', indicator=False)
test_merge = test_merge.merge(prev_refused, on='SK_ID_CURR', how='left', indicator=False)
test_merge['PRE_CONTRACT_APPROVED_RATE'] = test_merge['PRE_CONTRACT_APPROVED'] /(test_merge['PRE_CONTRACT_APPROVED'] + test_merge['PRE_CONTRACT_REFUSED'])
test_merge['PRE_CONTRACT_REFUSED_RATE'] = test_merge['PRE_CONTRACT_REFUSED'] /(test_merge['PRE_CONTRACT_APPROVED'] + test_merge['PRE_CONTRACT_REFUSED'])
test_merge = test_merge.replace(float('NaN'),0)
test_merge = test_merge.merge(PAST_LOANS_PER_CUS, on = ['SK_ID_CURR'], how = 'left')
test_merge = test_merge.merge(BUREAU_LOAN_TYPES, on = ['SK_ID_CURR'], how = 'left' ).fillna(0)
test_merge['AVERAGE_LOAN_TYPE'] = test_merge['BUREAU_LOAN_COUNT']/test_merge['BUREAU_LOAN_TYPES']
test_merge = test_merge.fillna(0)
del test_merge['BUREAU_LOAN_COUNT'], test_merge['BUREAU_LOAN_TYPES']
test_merge = test_merge.merge(grp, on = ['SK_ID_CURR'], how = 'left' ).fillna(0)
test_merge['BUREAU_ENDDATE_FACT_DIFF'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT']
test_merge['BUREAU_CREDIT_FACT_DIFF'] = bureau['DAYS_CREDIT'] - bureau['DAYS_ENDDATE_FACT']
test_merge['BUREAU_CREDIT_ENDDATE_DIFF'] = bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE']
test_merge['BUREAU_CREDIT_DEBT_RATIO'] = bureau['AMT_CREDIT_SUM_DEBT'] / bureau['AMT_CREDIT_SUM']
test_merge['BUREAU_CREDIT_DEBT_DIFF'] = bureau['AMT_CREDIT_SUM_DEBT'] - bureau['AMT_CREDIT_SUM']
test_merge = test_merge.fillna(0)
preds = clf.predict_proba(test_merge.drop(columns=['SK_ID_CURR'])) [:, 1]
app_test['TARGET'] = preds
app_test[['SK_ID_CURR', 'TARGET']].to_csv('result_00.csv', index=False ) | Home Credit Default Risk |
15,886,745 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<train_model> | %matplotlib inline | Home Credit Default Risk |
15,886,745 | train_history = model.fit(
x ={'input_ids':x_train['input_ids'],'attention_mask':x_train['attention_mask']} ,
y = y_train, epochs=12, batch_size=32
)<string_transform> | import os, sys | Home Credit Default Risk |
15,886,745 | x_test = tokenizer(
text=test.text.tolist() ,
add_special_tokens=True,
max_length=73,
truncation=True,
padding=True,
return_tensors='tf',
return_token_type_ids = False,
return_attention_mask = True,
verbose = True)
<predict_on_test> | default_dir = ".. /input/home-credit-default-risk/" | Home Credit Default Risk |
15,886,745 | predicted = model.predict({'input_ids':x_test['input_ids'],'attention_mask':x_test['attention_mask']} )<prepare_x_and_y> | def get_balance_data() :
pos_dtype = {
'SK_ID_PREV':np.uint32, 'SK_ID_CURR':np.uint32, 'MONTHS_BALANCE':np.int32, 'SK_DPD':np.int32,
'SK_DPD_DEF':np.int32, 'CNT_INSTALMENT':np.float32,'CNT_INSTALMENT_FUTURE':np.float32
}
install_dtype = {
'SK_ID_PREV':np.uint32, 'SK_ID_CURR':np.uint32, 'NUM_INSTALMENT_NUMBER':np.int32, 'NUM_INSTALMENT_VERSION':np.float32,
'DAYS_INSTALMENT':np.float32, 'DAYS_ENTRY_PAYMENT':np.float32, 'AMT_INSTALMENT':np.float32, 'AMT_PAYMENT':np.float32
}
card_dtype = {
'SK_ID_PREV':np.uint32, 'SK_ID_CURR':np.uint32, 'MONTHS_BALANCE':np.int16,
'AMT_CREDIT_LIMIT_ACTUAL':np.int32, 'CNT_DRAWINGS_CURRENT':np.int32, 'SK_DPD':np.int32,'SK_DPD_DEF':np.int32,
'AMT_BALANCE':np.float32, 'AMT_DRAWINGS_ATM_CURRENT':np.float32, 'AMT_DRAWINGS_CURRENT':np.float32,
'AMT_DRAWINGS_OTHER_CURRENT':np.float32, 'AMT_DRAWINGS_POS_CURRENT':np.float32, 'AMT_INST_MIN_REGULARITY':np.float32,
'AMT_PAYMENT_CURRENT':np.float32, 'AMT_PAYMENT_TOTAL_CURRENT':np.float32, 'AMT_RECEIVABLE_PRINCIPAL':np.float32,
'AMT_RECIVABLE':np.float32, 'AMT_TOTAL_RECEIVABLE':np.float32, 'CNT_DRAWINGS_ATM_CURRENT':np.float32,
'CNT_DRAWINGS_OTHER_CURRENT':np.float32, 'CNT_DRAWINGS_POS_CURRENT':np.float32, 'CNT_INSTALMENT_MATURE_CUM':np.float32
}
pos_bal = pd.read_csv(os.path.join(default_dir,'POS_CASH_balance.csv'), dtype=pos_dtype)
install = pd.read_csv(os.path.join(default_dir,'installments_payments.csv'), dtype=install_dtype)
card_bal = pd.read_csv(os.path.join(default_dir, 'credit_card_balance.csv'), dtype=card_dtype)
return pos_bal, install, card_bal
pos_bal, install, card_bal = get_balance_data() | Home Credit Default Risk |
15,886,745 | y_predicted = np.where(predicted>0.5,1,0 )<prepare_output> | from sklearn.model_selection import train_test_split
from lightgbm import LGBMClassifier | Home Credit Default Risk |
15,886,745 | y_predictedd = y_predicted.reshape(( 1,3263)) [0]<feature_engineering> | def get_apps_processed(apps):
apps['APPS_EXT_SOURCE_MEAN'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis = 1)
apps['APPS_EXT_SOURCE_STD'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)
apps['APPS_EXT_SOURCE_STD'] = apps['APPS_EXT_SOURCE_STD'].fillna(apps['APPS_EXT_SOURCE_STD'].mean())
apps['APPS_ANNUITY_CREDIT_RATIO'] = apps['AMT_ANNUITY']/apps['AMT_CREDIT']
apps['APPS_GOODS_CREDIT_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_CREDIT']
apps['APPS_ANNUITY_INCOME_RATIO'] = apps['AMT_ANNUITY']/apps['AMT_INCOME_TOTAL']
apps['APPS_CREDIT_INCOME_RATIO'] = apps['AMT_CREDIT']/apps['AMT_INCOME_TOTAL']
apps['APPS_GOODS_INCOME_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_INCOME_TOTAL']
apps['APPS_CNT_FAM_INCOME_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['CNT_FAM_MEMBERS']
apps['APPS_EMPLOYED_BIRTH_RATIO'] = apps['DAYS_EMPLOYED']/apps['DAYS_BIRTH']
apps['APPS_INCOME_EMPLOYED_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_EMPLOYED']
apps['APPS_INCOME_BIRTH_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_BIRTH']
apps['APPS_CAR_BIRTH_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_BIRTH']
apps['APPS_CAR_EMPLOYED_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_EMPLOYED']
return apps
def get_prev_processed(prev):
prev['PREV_CREDIT_DIFF'] = prev['AMT_APPLICATION'] - prev['AMT_CREDIT']
prev['PREV_GOODS_DIFF'] = prev['AMT_APPLICATION'] - prev['AMT_GOODS_PRICE']
prev['PREV_CREDIT_APPL_RATIO'] = prev['AMT_CREDIT']/prev['AMT_APPLICATION']
prev['PREV_GOODS_APPL_RATIO'] = prev['AMT_GOODS_PRICE']/prev['AMT_APPLICATION']
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
prev['PREV_DAYS_LAST_DUE_DIFF'] = prev['DAYS_LAST_DUE_1ST_VERSION'] - prev['DAYS_LAST_DUE']
all_pay = prev['AMT_ANNUITY'] * prev['CNT_PAYMENT']
prev['PREV_INTERESTS_RATE'] =(all_pay/prev['AMT_CREDIT'] - 1)/prev['CNT_PAYMENT']
return prev
def get_prev_amt_agg(prev):
agg_dict = {
'SK_ID_CURR':['count'],
'AMT_CREDIT':['mean', 'max', 'sum'],
'AMT_ANNUITY':['mean', 'max', 'sum'],
'AMT_APPLICATION':['mean', 'max', 'sum'],
'AMT_DOWN_PAYMENT':['mean', 'max', 'sum'],
'AMT_GOODS_PRICE':['mean', 'max', 'sum'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
'PREV_CREDIT_DIFF':['mean', 'max', 'sum'],
'PREV_CREDIT_APPL_RATIO':['mean', 'max'],
'PREV_GOODS_DIFF':['mean', 'max', 'sum'],
'PREV_GOODS_APPL_RATIO':['mean', 'max'],
'PREV_DAYS_LAST_DUE_DIFF':['mean', 'max', 'sum'],
'PREV_INTERESTS_RATE':['mean', 'max']
}
prev_group = prev.groupby('SK_ID_CURR')
prev_amt_agg = prev_group.agg(agg_dict)
prev_amt_agg.columns = ["PREV_"+ "_".join(x ).upper() for x in prev_amt_agg.columns.ravel() ]
return prev_amt_agg
def get_prev_refused_appr_agg(prev):
prev_refused_appr_group = prev[prev['NAME_CONTRACT_STATUS'].isin(['Approved', 'Refused'])].groupby([ 'SK_ID_CURR', 'NAME_CONTRACT_STATUS'])
prev_refused_appr_agg = prev_refused_appr_group['SK_ID_CURR'].count().unstack()
prev_refused_appr_agg.columns = ['PREV_APPROVED_COUNT', 'PREV_REFUSED_COUNT' ]
prev_refused_appr_agg = prev_refused_appr_agg.fillna(0)
return prev_refused_appr_agg
def get_prev_days365_agg(prev):
cond_days365 = prev['DAYS_DECISION'] > -365
prev_days365_group = prev[cond_days365].groupby('SK_ID_CURR')
agg_dict = {
'SK_ID_CURR':['count'],
'AMT_CREDIT':['mean', 'max', 'sum'],
'AMT_ANNUITY':['mean', 'max', 'sum'],
'AMT_APPLICATION':['mean', 'max', 'sum'],
'AMT_DOWN_PAYMENT':['mean', 'max', 'sum'],
'AMT_GOODS_PRICE':['mean', 'max', 'sum'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
'PREV_CREDIT_DIFF':['mean', 'max', 'sum'],
'PREV_CREDIT_APPL_RATIO':['mean', 'max'],
'PREV_GOODS_DIFF':['mean', 'max', 'sum'],
'PREV_GOODS_APPL_RATIO':['mean', 'max'],
'PREV_DAYS_LAST_DUE_DIFF':['mean', 'max', 'sum'],
'PREV_INTERESTS_RATE':['mean', 'max']
}
prev_days365_agg = prev_days365_group.agg(agg_dict)
prev_days365_agg.columns = ["PREV_D365_"+ "_".join(x ).upper() for x in prev_days365_agg.columns.ravel() ]
return prev_days365_agg
def get_prev_agg(prev):
prev = get_prev_processed(prev)
prev_amt_agg = get_prev_amt_agg(prev)
prev_refused_appr_agg = get_prev_refused_appr_agg(prev)
prev_days365_agg = get_prev_days365_agg(prev)
prev_agg = prev_amt_agg.merge(prev_refused_appr_agg, on='SK_ID_CURR', how='left')
prev_agg = prev_agg.merge(prev_days365_agg, on='SK_ID_CURR', how='left')
prev_agg['PREV_REFUSED_RATIO'] = prev_agg['PREV_REFUSED_COUNT']/prev_agg['PREV_SK_ID_CURR_COUNT']
prev_agg['PREV_APPROVED_RATIO'] = prev_agg['PREV_APPROVED_COUNT']/prev_agg['PREV_SK_ID_CURR_COUNT']
prev_agg = prev_agg.drop(['PREV_REFUSED_COUNT', 'PREV_APPROVED_COUNT'], axis=1)
return prev_agg
def get_bureau_processed(bureau):
bureau['BUREAU_ENDDATE_FACT_DIFF'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT']
bureau['BUREAU_CREDIT_FACT_DIFF'] = bureau['DAYS_CREDIT'] - bureau['DAYS_ENDDATE_FACT']
bureau['BUREAU_CREDIT_ENDDATE_DIFF'] = bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE']
bureau['BUREAU_CREDIT_DEBT_RATIO']=bureau['AMT_CREDIT_SUM_DEBT']/bureau['AMT_CREDIT_SUM']
bureau['BUREAU_CREDIT_DEBT_DIFF'] = bureau['AMT_CREDIT_SUM_DEBT'] - bureau['AMT_CREDIT_SUM']
bureau['BUREAU_IS_DPD'] = bureau['CREDIT_DAY_OVERDUE'].apply(lambda x: 1 if x > 0 else 0)
bureau['BUREAU_IS_DPD_OVER120'] = bureau['CREDIT_DAY_OVERDUE'].apply(lambda x: 1 if x >120 else 0)
return bureau
def get_bureau_day_amt_agg(bureau):
bureau_agg_dict = {
'SK_ID_BUREAU':['count'],
'DAYS_CREDIT':['min', 'max', 'mean'],
'CREDIT_DAY_OVERDUE':['min', 'max', 'mean'],
'DAYS_CREDIT_ENDDATE':['min', 'max', 'mean'],
'DAYS_ENDDATE_FACT':['min', 'max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['max', 'mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean', 'sum'],
'BUREAU_ENDDATE_FACT_DIFF':['min', 'max', 'mean'],
'BUREAU_CREDIT_FACT_DIFF':['min', 'max', 'mean'],
'BUREAU_CREDIT_ENDDATE_DIFF':['min', 'max', 'mean'],
'BUREAU_CREDIT_DEBT_RATIO':['min', 'max', 'mean'],
'BUREAU_CREDIT_DEBT_DIFF':['min', 'max', 'mean'],
'BUREAU_IS_DPD':['mean', 'sum'],
'BUREAU_IS_DPD_OVER120':['mean', 'sum']
}
bureau_grp = bureau.groupby('SK_ID_CURR')
bureau_day_amt_agg = bureau_grp.agg(bureau_agg_dict)
bureau_day_amt_agg.columns = ['BUREAU_'+('_' ).join(column ).upper() for column in bureau_day_amt_agg.columns.ravel() ]
bureau_day_amt_agg = bureau_day_amt_agg.reset_index()
return bureau_day_amt_agg
def get_bureau_active_agg(bureau):
cond_active = bureau['CREDIT_ACTIVE'] == 'Active'
bureau_active_grp = bureau[cond_active].groupby(['SK_ID_CURR'])
bureau_agg_dict = {
'SK_ID_BUREAU':['count'],
'DAYS_CREDIT':['min', 'max', 'mean'],
'CREDIT_DAY_OVERDUE':['min', 'max', 'mean'],
'DAYS_CREDIT_ENDDATE':['min', 'max', 'mean'],
'DAYS_ENDDATE_FACT':['min', 'max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['max', 'mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean', 'sum'],
'BUREAU_ENDDATE_FACT_DIFF':['min', 'max', 'mean'],
'BUREAU_CREDIT_FACT_DIFF':['min', 'max', 'mean'],
'BUREAU_CREDIT_ENDDATE_DIFF':['min', 'max', 'mean'],
'BUREAU_CREDIT_DEBT_RATIO':['min', 'max', 'mean'],
'BUREAU_CREDIT_DEBT_DIFF':['min', 'max', 'mean'],
'BUREAU_IS_DPD':['mean', 'sum'],
'BUREAU_IS_DPD_OVER120':['mean', 'sum']
}
bureau_active_agg = bureau_active_grp.agg(bureau_agg_dict)
bureau_active_agg.columns = ['BUREAU_ACT_'+('_' ).join(column ).upper() for column in bureau_active_agg.columns.ravel() ]
bureau_active_agg = bureau_active_agg.reset_index()
return bureau_active_agg
def get_bureau_days750_agg(bureau):
cond_days750 = bureau['DAYS_CREDIT'] > -750
bureau_days750_group = bureau[cond_days750].groupby('SK_ID_CURR')
bureau_agg_dict = {
'SK_ID_BUREAU':['count'],
'DAYS_CREDIT':['min', 'max', 'mean'],
'CREDIT_DAY_OVERDUE':['min', 'max', 'mean'],
'DAYS_CREDIT_ENDDATE':['min', 'max', 'mean'],
'DAYS_ENDDATE_FACT':['min', 'max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['max', 'mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean', 'sum'],
'BUREAU_ENDDATE_FACT_DIFF':['min', 'max', 'mean'],
'BUREAU_CREDIT_FACT_DIFF':['min', 'max', 'mean'],
'BUREAU_CREDIT_ENDDATE_DIFF':['min', 'max', 'mean'],
'BUREAU_CREDIT_DEBT_RATIO':['min', 'max', 'mean'],
'BUREAU_CREDIT_DEBT_DIFF':['min', 'max', 'mean'],
'BUREAU_IS_DPD':['mean', 'sum'],
'BUREAU_IS_DPD_OVER120':['mean', 'sum']
}
bureau_days750_agg = bureau_days750_group.agg(bureau_agg_dict)
bureau_days750_agg.columns = ['BUREAU_ACT_'+('_' ).join(column ).upper() for column in bureau_days750_agg.columns.ravel() ]
bureau_days750_agg = bureau_days750_agg.reset_index()
return bureau_days750_agg
def get_bureau_bal_agg(bureau, bureau_bal):
bureau_bal = bureau_bal.merge(bureau[['SK_ID_CURR', 'SK_ID_BUREAU']], on='SK_ID_BUREAU', how='left')
bureau_bal['BUREAU_BAL_IS_DPD'] = bureau_bal['STATUS'].apply(lambda x: 1 if x in['1','2','3','4','5'] else 0)
bureau_bal['BUREAU_BAL_IS_DPD_OVER120'] = bureau_bal['STATUS'].apply(lambda x: 1 if x =='5' else 0)
bureau_bal_grp = bureau_bal.groupby('SK_ID_CURR')
bureau_bal_agg_dict = {
'SK_ID_CURR':['count'],
'MONTHS_BALANCE':['min', 'max', 'mean'],
'BUREAU_BAL_IS_DPD':['mean', 'sum'],
'BUREAU_BAL_IS_DPD_OVER120':['mean', 'sum']
}
bureau_bal_agg = bureau_bal_grp.agg(bureau_bal_agg_dict)
bureau_bal_agg.columns = [ 'BUREAU_BAL_'+('_' ).join(column ).upper() for column in bureau_bal_agg.columns.ravel() ]
bureau_bal_agg = bureau_bal_agg.reset_index()
return bureau_bal_agg
def get_bureau_agg(bureau, bureau_bal):
bureau = get_bureau_processed(bureau)
bureau_day_amt_agg = get_bureau_day_amt_agg(bureau)
bureau_active_agg = get_bureau_active_agg(bureau)
bureau_days750_agg = get_bureau_days750_agg(bureau)
bureau_bal_agg = get_bureau_bal_agg(bureau, bureau_bal)
bureau_agg = bureau_day_amt_agg.merge(bureau_active_agg, on='SK_ID_CURR', how='left')
bureau_agg['BUREAU_ACT_IS_DPD_RATIO'] = bureau_agg['BUREAU_ACT_BUREAU_IS_DPD_SUM']/bureau_agg['BUREAU_SK_ID_BUREAU_COUNT']
bureau_agg['BUREAU_ACT_IS_DPD_OVER120_RATIO'] = bureau_agg['BUREAU_ACT_BUREAU_IS_DPD_OVER120_SUM']/bureau_agg['BUREAU_SK_ID_BUREAU_COUNT']
bureau_agg = bureau_agg.merge(bureau_bal_agg, on='SK_ID_CURR', how='left')
bureau_agg = bureau_agg.merge(bureau_days750_agg, on='SK_ID_CURR', how='left')
return bureau_agg
def get_apps_all_with_prev_agg(apps, prev):
apps_all = get_apps_processed(apps)
prev_agg = get_prev_agg(prev)
print('prev_agg shape:', prev_agg.shape)
print('apps_all before merge shape:', apps_all.shape)
apps_all = apps_all.merge(prev_agg, on='SK_ID_CURR', how='left')
print('apps_all after merge with prev_agg shape:', apps_all.shape)
return apps_all
def get_apps_all_encoded(apps_all):
object_columns = apps_all.dtypes[apps_all.dtypes == 'object'].index.tolist()
for column in object_columns:
apps_all[column] = pd.factorize(apps_all[column])[0]
return apps_all
def get_apps_all_train_test(apps_all):
apps_all_train = apps_all[~apps_all['TARGET'].isnull() ]
apps_all_test = apps_all[apps_all['TARGET'].isnull() ]
apps_all_test = apps_all_test.drop('TARGET', axis=1)
return apps_all_train, apps_all_test
def train_apps_all(apps_all_train):
ftr_app = apps_all_train.drop(['SK_ID_CURR', 'TARGET'], axis=1)
target_app = apps_all_train['TARGET']
train_x, valid_x, train_y, valid_y = train_test_split(ftr_app, target_app, test_size=0.3, random_state=2020)
print('train shape:', train_x.shape, 'valid shape:', valid_x.shape)
clf = LGBMClassifier(
nthread=4,
n_estimators=2000,
learning_rate=0.02,
max_depth = 11,
num_leaves=58,
colsample_bytree=0.613,
subsample=0.708,
max_bin=407,
reg_alpha=3.564,
reg_lambda=4.930,
min_child_weight= 6,
min_child_samples=165,
silent=-1,
verbose=-1,
)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric= 'auc', verbose= 100,
early_stopping_rounds= 200)
return clf | Home Credit Default Risk |
15,886,745 | sample['id'] = test.id
sample['target'] = y_predictedd<save_to_csv> | def get_pos_bal_agg(pos_bal):
cond_over_0 = pos_bal['SK_DPD'] > 0
cond_100 =(pos_bal['SK_DPD'] < 100)&(pos_bal['SK_DPD'] > 0)
cond_over_100 =(pos_bal['SK_DPD'] >= 100)
pos_bal['POS_IS_DPD'] = pos_bal['SK_DPD'].apply(lambda x: 1 if x > 0 else 0)
pos_bal['POS_IS_DPD_UNDER_120'] = pos_bal['SK_DPD'].apply(lambda x:1 if(x > 0)&(x <120)else 0)
pos_bal['POS_IS_DPD_OVER_120'] = pos_bal['SK_DPD'].apply(lambda x:1 if x >= 120 else 0)
pos_bal_grp = pos_bal.groupby('SK_ID_CURR')
pos_bal_agg_dict = {
'SK_ID_CURR':['count'],
'MONTHS_BALANCE':['min', 'mean', 'max'],
'SK_DPD':['min', 'max', 'mean', 'sum'],
'CNT_INSTALMENT':['min', 'max', 'mean', 'sum'],
'CNT_INSTALMENT_FUTURE':['min', 'max', 'mean', 'sum'],
'POS_IS_DPD':['mean', 'sum'],
'POS_IS_DPD_UNDER_120':['mean', 'sum'],
'POS_IS_DPD_OVER_120':['mean', 'sum']
}
pos_bal_agg = pos_bal_grp.agg(pos_bal_agg_dict)
pos_bal_agg.columns = [('POS_')+('_' ).join(column ).upper() for column in pos_bal_agg.columns.ravel() ]
cond_months = pos_bal['MONTHS_BALANCE'] > -20
pos_bal_m20_grp = pos_bal[cond_months].groupby('SK_ID_CURR')
pos_bal_m20_agg_dict = {
'SK_ID_CURR':['count'],
'MONTHS_BALANCE':['min', 'mean', 'max'],
'SK_DPD':['min', 'max', 'mean', 'sum'],
'CNT_INSTALMENT':['min', 'max', 'mean', 'sum'],
'CNT_INSTALMENT_FUTURE':['min', 'max', 'mean', 'sum'],
'POS_IS_DPD':['mean', 'sum'],
'POS_IS_DPD_UNDER_120':['mean', 'sum'],
'POS_IS_DPD_OVER_120':['mean', 'sum']
}
pos_bal_m20_agg = pos_bal_m20_grp.agg(pos_bal_m20_agg_dict)
pos_bal_m20_agg.columns = [('POS_M20')+('_' ).join(column ).upper() for column in pos_bal_m20_agg.columns.ravel() ]
pos_bal_agg = pos_bal_agg.merge(pos_bal_m20_agg, on='SK_ID_CURR', how='left')
pos_bal_agg = pos_bal_agg.reset_index()
return pos_bal_agg
def get_install_agg(install):
install['AMT_DIFF'] = install['AMT_INSTALMENT'] - install['AMT_PAYMENT']
install['AMT_RATIO'] =(install['AMT_PAYMENT'] +1)/(install['AMT_INSTALMENT'] + 1)
install['SK_DPD'] = install['DAYS_ENTRY_PAYMENT'] - install['DAYS_INSTALMENT']
install['INS_IS_DPD'] = install['SK_DPD'].apply(lambda x: 1 if x > 0 else 0)
install['INS_IS_DPD_UNDER_120'] = install['SK_DPD'].apply(lambda x:1 if(x > 0)&(x <120)else 0)
install['INS_IS_DPD_OVER_120'] = install['SK_DPD'].apply(lambda x:1 if x >= 120 else 0)
install_grp = install.groupby('SK_ID_CURR')
install_agg_dict = {
'SK_ID_CURR':['count'],
'NUM_INSTALMENT_VERSION':['nunique'],
'DAYS_ENTRY_PAYMENT':['mean', 'max', 'sum'],
'DAYS_INSTALMENT':['mean', 'max', 'sum'],
'AMT_INSTALMENT':['mean', 'max', 'sum'],
'AMT_PAYMENT':['mean', 'max','sum'],
'AMT_DIFF':['mean','min', 'max','sum'],
'AMT_RATIO':['mean', 'max'],
'SK_DPD':['mean', 'min', 'max'],
'INS_IS_DPD':['mean', 'sum'],
'INS_IS_DPD_UNDER_120':['mean', 'sum'],
'INS_IS_DPD_OVER_120':['mean', 'sum']
}
install_agg = install_grp.agg(install_agg_dict)
install_agg.columns = ['INS_'+('_' ).join(column ).upper() for column in install_agg.columns.ravel() ]
cond_day = install['DAYS_ENTRY_PAYMENT'] >= -365
install_d365_grp = install[cond_day].groupby('SK_ID_CURR')
install_d365_agg_dict = {
'SK_ID_CURR':['count'],
'NUM_INSTALMENT_VERSION':['nunique'],
'DAYS_ENTRY_PAYMENT':['mean', 'max', 'sum'],
'DAYS_INSTALMENT':['mean', 'max', 'sum'],
'AMT_INSTALMENT':['mean', 'max', 'sum'],
'AMT_PAYMENT':['mean', 'max','sum'],
'AMT_DIFF':['mean','min', 'max','sum'],
'AMT_RATIO':['mean', 'max'],
'SK_DPD':['mean', 'min', 'max'],
'INS_IS_DPD':['mean', 'sum'],
'INS_IS_DPD_UNDER_120':['mean', 'sum'],
'INS_IS_DPD_OVER_120':['mean', 'sum']
}
install_d365_agg = install_d365_grp.agg(install_d365_agg_dict)
install_d365_agg.columns = ['INS_D365'+('_' ).join(column ).upper() for column in install_d365_agg.columns.ravel() ]
install_agg = install_agg.merge(install_d365_agg, on='SK_ID_CURR', how='left')
install_agg = install_agg.reset_index()
return install_agg
def get_card_bal_agg(card_bal):
card_bal['BALANCE_LIMIT_RATIO'] = card_bal['AMT_BALANCE']/card_bal['AMT_CREDIT_LIMIT_ACTUAL']
card_bal['DRAWING_LIMIT_RATIO'] = card_bal['AMT_DRAWINGS_CURRENT'] / card_bal['AMT_CREDIT_LIMIT_ACTUAL']
card_bal['CARD_IS_DPD'] = card_bal['SK_DPD'].apply(lambda x: 1 if x > 0 else 0)
card_bal['CARD_IS_DPD_UNDER_120'] = card_bal['SK_DPD'].apply(lambda x:1 if(x > 0)&(x <120)else 0)
card_bal['CARD_IS_DPD_OVER_120'] = card_bal['SK_DPD'].apply(lambda x:1 if x >= 120 else 0)
card_bal_grp = card_bal.groupby('SK_ID_CURR')
card_bal_agg_dict = {
'SK_ID_CURR':['count'],
'AMT_BALANCE':['max'],
'AMT_CREDIT_LIMIT_ACTUAL':['max'],
'AMT_DRAWINGS_ATM_CURRENT': ['max', 'sum'],
'AMT_DRAWINGS_CURRENT': ['max', 'sum'],
'AMT_DRAWINGS_POS_CURRENT': ['max', 'sum'],
'AMT_INST_MIN_REGULARITY': ['max', 'mean'],
'AMT_PAYMENT_TOTAL_CURRENT': ['max','sum'],
'AMT_TOTAL_RECEIVABLE': ['max', 'mean'],
'CNT_DRAWINGS_ATM_CURRENT': ['max','sum'],
'CNT_DRAWINGS_CURRENT': ['max', 'mean', 'sum'],
'CNT_DRAWINGS_POS_CURRENT': ['mean'],
'SK_DPD': ['mean', 'max', 'sum'],
'BALANCE_LIMIT_RATIO':['min','max'],
'DRAWING_LIMIT_RATIO':['min', 'max'],
'CARD_IS_DPD':['mean', 'sum'],
'CARD_IS_DPD_UNDER_120':['mean', 'sum'],
'CARD_IS_DPD_OVER_120':['mean', 'sum']
}
card_bal_agg = card_bal_grp.agg(card_bal_agg_dict)
card_bal_agg.columns = ['CARD_'+('_' ).join(column ).upper() for column in card_bal_agg.columns.ravel() ]
card_bal_agg = card_bal_agg.reset_index()
cond_month = card_bal.MONTHS_BALANCE >= -3
card_bal_m3_grp = card_bal[cond_month].groupby('SK_ID_CURR')
card_bal_m3_agg = card_bal_m3_grp.agg(card_bal_agg_dict)
card_bal_m3_agg.columns = ['CARD_M3'+('_' ).join(column ).upper() for column in card_bal_m3_agg.columns.ravel() ]
card_bal_agg = card_bal_agg.merge(card_bal_m3_agg, on='SK_ID_CURR', how='left')
card_bal_agg = card_bal_agg.reset_index()
return card_bal_agg | Home Credit Default Risk |
15,886,745 | sample.to_csv('submission_a.csv',index = False )<load_from_csv> | def get_apps_all_with_all_agg(apps, prev, bureau, bureau_bal, pos_bal, install, card_bal):
apps_all = get_apps_processed(apps)
prev_agg = get_prev_agg(prev)
bureau_agg = get_bureau_agg(bureau, bureau_bal)
pos_bal_agg = get_pos_bal_agg(pos_bal)
install_agg = get_install_agg(install)
card_bal_agg = get_card_bal_agg(card_bal)
print('prev_agg shape:', prev_agg.shape, 'bureau_agg shape:', bureau_agg.shape)
print('pos_bal_agg shape:', pos_bal_agg.shape, 'install_agg shape:', install_agg.shape, 'card_bal_agg shape:', card_bal_agg.shape)
print('apps_all before merge shape:', apps_all.shape)
apps_all = apps_all.merge(prev_agg, on='SK_ID_CURR', how='left')
apps_all = apps_all.merge(bureau_agg, on='SK_ID_CURR', how='left')
apps_all = apps_all.merge(pos_bal_agg, on='SK_ID_CURR', how='left')
apps_all = apps_all.merge(install_agg, on='SK_ID_CURR', how='left')
apps_all = apps_all.merge(card_bal_agg, on='SK_ID_CURR', how='left')
print('apps_all after merge with all shape:', apps_all.shape)
return apps_all | Home Credit Default Risk |
15,886,745 | df = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv", na_filter=False)
df.head()<categorify> | def get_dataset() :
app_train = pd.read_csv(os.path.join(default_dir,'application_train.csv'))
app_test = pd.read_csv(os.path.join(default_dir,'application_test.csv'))
apps = pd.concat([app_train, app_test])
prev = pd.read_csv(os.path.join(default_dir,'previous_application.csv'))
bureau = pd.read_csv(os.path.join(default_dir,'bureau.csv'))
bureau_bal = pd.read_csv(os.path.join(default_dir,'bureau_balance.csv'))
pos_bal, install, card_bal = get_balance_data()
return apps, prev, bureau, bureau_bal, pos_bal, install, card_bal | Home Credit Default Risk |
15,886,745 | nlp = spacy.load("en_core_web_sm")
def preprocess(text):
doc = nlp(text)
token_semstop = [word for word in doc if not word.is_stop if not word.text == '
text = ' '.join(token.lower_ for token in token_semstop)
text = re.sub(r'(@\w+|https?:\S+)', '', text)
text = text.replace(r'&?', r'and')
text = re.sub(r'(>|<)', '', text)
text = text.encode(encoding="ascii", errors="ignore" ).decode()
return text
<feature_engineering> | apps, prev, bureau, bureau_bal, pos_bal, install, card_bal = get_dataset() | Home Credit Default Risk |
15,886,745 | train_df['text'] = train_df['text'].apply(preprocess)
train_df.head()<load_from_csv> | apps_all = get_apps_all_with_all_agg(apps, prev, bureau, bureau_bal, pos_bal, install, card_bal)
apps_all = get_apps_all_encoded(apps_all)
apps_all_train, apps_all_test = get_apps_all_train_test(apps_all)
clf = train_apps_all(apps_all_train ) | Home Credit Default Risk |
15,886,745 | second_df = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv", na_filter=False)
second_df.head()
test_df = second_df[['text']].copy()
test_df['text'] = test_df['text'].apply(preprocess)
<normalization> | output_dir = ".. /output/kaggle/working/"
preds = clf.predict_proba(apps_all_test.drop(['SK_ID_CURR'], axis=1)) [:, 1 ]
apps_all_test['TARGET'] = preds
apps_all_test[['SK_ID_CURR', 'TARGET']] | Home Credit Default Risk |
15,886,745 | vectorizer = TfidfVectorizer(use_idf=True, ngram_range=(1,2), preprocessor=preprocess)
tfidf_data = vectorizer.fit_transform(train_df['text'])
<find_best_model_class> | apps_all_test[['SK_ID_CURR', 'TARGET']].to_csv('submission.csv', index=False ) | Home Credit Default Risk |
15,886,745 | <load_from_csv><EOS> | from lightgbm import plot_importance | Home Credit Default Risk |
22,248,594 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<save_to_csv> | import numpy as np
import pandas as pd
import joblib
import gc | Home Credit Default Risk |
22,248,594 | submission['target'] = test_df['target']
submission.to_csv("sample_submission.csv", index=False)
submission.head()<install_modules> | test = pd.read_csv('.. /input/home-credit-default-risk/application_test.csv')
test.set_index(['SK_ID_CURR'], inplace=True)
test.shape | Home Credit Default Risk |
22,248,594 | !pip install --user catboost
<load_from_csv> | preprocessor = joblib.load('.. /input/wk6-default/wk6default_preprocessor.joblib')
LGBM_model = joblib.load('.. /input/wk6-default/wk6_LGBM_default_model.joblib' ) | Home Credit Default Risk |
22,248,594 | train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv' )<count_missing_values> | bureau_bal = pd.read_csv('.. /input/home-credit-default-risk/bureau_balance.csv')
bureau = pd.read_csv('.. /input/home-credit-default-risk/bureau.csv')
bb = pd.merge(bureau, bureau_bal, on = 'SK_ID_BUREAU', how = 'left')
bb['REMAIN_CRED'] = bb['AMT_CREDIT_SUM'] - bb['AMT_CREDIT_SUM_DEBT'] - bb['AMT_CREDIT_SUM_LIMIT']
bb['AC_RATIO'] = bb['AMT_ANNUITY'] / bb['AMT_CREDIT_SUM']
bb.columns = ['BU_'+column if column !=('SK_ID_CURR')
else column for column in bb.columns]
bur_cat = pd.get_dummies(bb.select_dtypes('object'))
bur_cat['SK_ID_CURR'] = bb['SK_ID_CURR']
bur_cat = bur_cat.groupby(by = ['SK_ID_CURR'] ).agg(['mean'])
bur_num = bb.groupby(by = ['SK_ID_CURR'] ).agg(['max', 'mean', 'sum'] ).astype('float32')
bureau_rev = bur_cat.merge(bur_num, on = ['SK_ID_CURR'], how = 'left')
test = test.merge(bureau_rev, on = ['SK_ID_CURR'], how = 'left')
del bur_cat
del bur_num
del bureau
del bureau_bal
gc.collect() | Home Credit Default Risk |
22,248,594 | print(train.isnull().sum())
test.isnull().sum()<prepare_x_and_y> | cc_bal = pd.read_csv('.. /input/home-credit-default-risk/credit_card_balance.csv')
cc_bal['DRAW_RATIO'] = cc_bal['AMT_DRAWINGS_CURRENT'] / cc_bal['CNT_DRAWINGS_CURRENT']
cc_bal['RECEIVE_RATIO'] = cc_bal['AMT_RECIVABLE'] / cc_bal['AMT_RECEIVABLE_PRINCIPAL']
cc_bal['RECEIVE_PER'] = cc_bal['AMT_RECIVABLE'] / cc_bal['AMT_TOTAL_RECEIVABLE']
cc_bal.columns = ['CC_'+ column if column !='SK_ID_CURR'
else column for column in cc_bal.columns]
cc_cat = pd.get_dummies(cc_bal.select_dtypes('object'))
cc_cat['SK_ID_CURR'] = cc_bal['SK_ID_CURR']
cc_cat = cc_cat.groupby(by = ['SK_ID_CURR'] ).mean()
cc_num = cc_bal.groupby(by = ['SK_ID_CURR'] ).agg(['max', 'mean', 'sum'] ).astype('float32')
test = test.merge(cc_cat, on = ['SK_ID_CURR'], how = 'left')
test = test.merge(cc_num, on = ['SK_ID_CURR'], how = 'left')
del cc_bal
del cc_cat
del cc_num
gc.collect() | Home Credit Default Risk |
22,248,594 | df = train.drop(columns=['ACTION'])
train_x = train.drop(columns=['ACTION'])
train_y = train['ACTION']
test_x = test.drop(columns=['id'])
<split> | install = pd.read_csv('.. /input/home-credit-default-risk/installments_payments.csv')
install['PAY_PERCENT'] = install['AMT_INSTALMENT'] / install['AMT_PAYMENT']
install['PAY_DIFF'] = install['AMT_INSTALMENT'] - install['AMT_PAYMENT']
install['DPD'] = install['DAYS_ENTRY_PAYMENT'] - install['DAYS_INSTALMENT']
install['DPD'] = install['DPD'].apply(lambda x: x if x>0 else 0)
install['DBD'] = install['DAYS_INSTALMENT'] - install['DAYS_ENTRY_PAYMENT']
install['DBD'] = install['DBD'].apply(lambda x: x if x>0 else 0)
install.columns = ['IP_'+ column if column !='SK_ID_CURR'
else column for column in install.columns]
inst_num = install.groupby(by = ['SK_ID_CURR'] ).agg(['max', 'mean'] ).astype('float32')
test = test.merge(inst_num, on = 'SK_ID_CURR', how='left')
del install
del inst_num
gc.collect() | Home Credit Default Risk |
22,248,594 | X_train, X_test, y_train, y_test = train_test_split(train_x, train_y )<choose_model_class> | pos = pd.read_csv('.. /input/home-credit-default-risk/POS_CASH_balance.csv')
pos.columns = ['PC_'+ column if column !='SK_ID_CURR'
else column for column in pos.columns]
pos_num = pos.groupby(by = ['SK_ID_CURR'] ).agg(['max', 'mean', 'sum'] ).astype('float32')
test = test.merge(pos_num, on = ['SK_ID_CURR'], how = 'left')
del pos
del pos_num
gc.collect()
| Home Credit Default Risk |
22,248,594 | model = lm.LogisticRegression()
model.fit(X_train, y_train)
predictions = model.predict_proba(test_x)
<prepare_x_and_y> | prev = pd.read_csv('.. /input/home-credit-default-risk/previous_application.csv')
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace = True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace = True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace = True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace = True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace = True)
prev['AppCred_RATIO'] = prev['AMT_APPLICATION'] /(prev['AMT_CREDIT'] + 1)
prev['AppGoods_RATIO'] = prev['AMT_APPLICATION'] /(prev['AMT_GOODS_PRICE'] + 1)
prev['AnnCred_RATIO'] = prev['AMT_ANNUITY'] /(prev['AMT_CREDIT'] + 1)
prev['CredGoods_RATIO'] = prev['AMT_CREDIT'] /(prev['AMT_GOODS_PRICE'] + 1)
def calc_rate(row):
return np.rate(row['CNT_PAYMENT'], -row['AMT_ANNUITY'], row['AMT_CREDIT'], 0, guess = 0.05, maxiter = 10)
prev['CALC_RATE'] = prev.apply(calc_rate, axis=1)
p_dels = ['RATE_INTEREST_PRIMARY','RATE_INTEREST_PRIVILEGED']
prev = prev.drop(prev[p_dels], axis = 1)
prev.columns = ['PR_'+ column if column != 'SK_ID_CURR'
else column for column in prev.columns]
prev_cat = pd.get_dummies(prev.select_dtypes('object'))
prev_cat['SK_ID_CURR'] = prev['SK_ID_CURR']
prev_cat = prev_cat.groupby(by = ['SK_ID_CURR'] ).agg(['mean'])
prev_num = prev.groupby(by = ['SK_ID_CURR'] ).agg(['max', 'mean', 'sum'] ).astype('float32')
prev_rev = prev_num.merge(prev_cat, on = ['SK_ID_CURR'], how = 'left')
test = test.merge(prev_rev, on = ['SK_ID_CURR'], how = 'left')
del prev_rev
del prev_cat
del prev_num
gc.collect() | Home Credit Default Risk |
22,248,594 | print(f"{X_train.shape}, {X_test.shape}, {test.drop(columns=['id'] ).shape}")
test_x = test.drop(columns=['id'])
X_train.reset_index(drop=True, inplace=True)
y_train.reset_index(drop=True, inplace=True )<predict_on_test> | test['DAYS_EMPLOYED'].replace(365243, np.nan, inplace = True)
test['AGE'] = test['DAYS_BIRTH'] / - 365
test['AVG_EXT'] = test.iloc[:, 41:44].sum(axis=1)/(3- test.iloc[:,41:44].isnull().sum(axis=1))
test.EXT_SOURCE_1.fillna(test.AVG_EXT, inplace=True)
test.EXT_SOURCE_2.fillna(test.AVG_EXT, inplace=True)
test.EXT_SOURCE_3.fillna(test.AVG_EXT, inplace=True ) | Home Credit Default Risk |
22,248,594 | model = naive_bayes.CategoricalNB()
<train_model> | test['EmpAge_RATIO'] = test['DAYS_EMPLOYED'] / test['AGE']
test['CredInc_RATIO'] = test['AMT_CREDIT'] / test['AMT_INCOME_TOTAL']
test['AnnInc_RATIO'] = test['AMT_ANNUITY'] / test['AMT_INCOME_TOTAL']
test['AnnCred_RATIO'] = test['AMT_ANNUITY'] /(test['AMT_CREDIT'] + 1)
test['CredGoods_RATIO'] = test['AMT_CREDIT'] /(test['AMT_GOODS_PRICE'] + 1)
test['AVG_EXT_INCOME'] = test['AMT_INCOME_TOTAL'] * test['AVG_EXT']
test['AVG_EXT_GOODS'] = test['AMT_GOODS_PRICE'] * test['AVG_EXT'] | Home Credit Default Risk |
22,248,594 | model = DecisionTreeClassifier(max_depth=30)
clf = model.fit(X_train, y_train)
print(f'{clf.score(X_test,y_test)}')
predictions = clf.predict(test_x)
<train_model> | dels = ['APARTMENTS_MODE', 'BASEMENTAREA_MODE', 'YEARS_BEGINEXPLUATATION_MODE',
'YEARS_BUILD_MODE', 'COMMONAREA_MODE', 'ELEVATORS_MODE', 'ENTRANCES_MODE',
'FLOORSMAX_MODE', 'FLOORSMIN_MODE', 'LANDAREA_MODE', 'LIVINGAPARTMENTS_MODE',
'LIVINGAREA_MODE', 'NONLIVINGAPARTMENTS_MODE', 'NONLIVINGAREA_MODE',
'APARTMENTS_MEDI', 'BASEMENTAREA_MEDI', 'YEARS_BEGINEXPLUATATION_MEDI',
'YEARS_BUILD_MEDI', 'COMMONAREA_MEDI', 'ELEVATORS_MEDI', 'ENTRANCES_MEDI',
'FLOORSMAX_MEDI', 'FLOORSMIN_MEDI', 'LANDAREA_MEDI', 'LIVINGAPARTMENTS_MEDI',
'LIVINGAREA_MEDI', 'NONLIVINGAPARTMENTS_MEDI', 'NONLIVINGAREA_MEDI',
'FONDKAPREMONT_MODE', 'HOUSETYPE_MODE', 'TOTALAREA_MODE',
'WALLSMATERIAL_MODE', 'EMERGENCYSTATE_MODE', 'FLAG_DOCUMENT_2',
'FLAG_DOCUMENT_3', 'FLAG_DOCUMENT_4', 'FLAG_DOCUMENT_5',
'FLAG_DOCUMENT_6', 'FLAG_DOCUMENT_7', 'FLAG_DOCUMENT_8',
'FLAG_DOCUMENT_9', 'FLAG_DOCUMENT_10', 'FLAG_DOCUMENT_11',
'FLAG_DOCUMENT_12', 'FLAG_DOCUMENT_13', 'FLAG_DOCUMENT_14',
'FLAG_DOCUMENT_15', 'FLAG_DOCUMENT_16', 'FLAG_DOCUMENT_17',
'FLAG_DOCUMENT_18', 'FLAG_DOCUMENT_19', 'FLAG_DOCUMENT_20',
'FLAG_DOCUMENT_21', 'DAYS_BIRTH', 'LIVINGAPARTMENTS_AVG',
'LIVINGAREA_AVG', 'CNT_FAM_MEMBERS', 'OBS_30_CNT_SOCIAL_CIRCLE',
'OBS_60_CNT_SOCIAL_CIRCLE', 'ELEVATORS_AVG', 'AVG_EXT']
test = test.drop(test[dels], axis =1)
gc.collect() | Home Credit Default Risk |
22,248,594 | model = RandomForestClassifier(n_estimators = 300)
clf = model.fit(X_train, y_train)
print(f'{clf.score(X_test, y_test)}')
predictions = clf.predict_proba(test_x)
<define_variables> | test = test.replace([np.inf, -np.inf], np.nan ) | Home Credit Default Risk |
22,248,594 | feature_names = dict()
for column, name in enumerate(train):
if column == 0:
continue
feature_names[column - 1] = name
dataset_dir = './amazon'
create_cd(
label=0,
cat_features=list(range(1, train.columns.shape[0])) ,
feature_names=feature_names,
output_path=os.path.join(dataset_dir, 'train.cd')
)<prepare_x_and_y> | test_pred = LGBM_model.predict_proba(X_test)
print(test_pred.shape)
print(test_pred[:5] ) | Home Credit Default Risk |
22,248,594 | X = train.drop(columns=['ACTION'])
y = train.ACTION
cat_features = list(range(0, X.shape[1]))
print(cat_features )<define_variables> | submission = pd.read_csv('.. /input/home-credit-default-risk/sample_submission.csv')
submission.head(10 ) | Home Credit Default Risk |
22,248,594 | pool1 = Pool(data=X, label=y, cat_features=cat_features)
pool2 = Pool(
data=os.path.join('/kaggle/input/amazon-employee-access-challenge/', 'train.csv'),
delimiter=',',
column_description=os.path.join(dataset_dir, 'train.cd'),
has_header=True
)
pool3 = Pool(data=X, cat_features=cat_features)
X_prepared = X.values.astype(str ).astype(object)
pool4 = Pool(
data=FeaturesData(
cat_feature_data=X_prepared,
cat_feature_names=list(X)
),
label=y.values
)
print('Dataset shape')
print('dataset 1:' + str(pool1.shape)+
'
dataset 2:' + str(pool2.shape)+
'
dataset 3:' + str(pool3.shape)+
'
dataset 4: ' + str(pool4.shape))
print('
')
print('Column names')
print('dataset 1:')
print(pool1.get_feature_names())
print('
dataset 2:')
print(pool2.get_feature_names())
print('
dataset 3:')
print(pool3.get_feature_names())
print('
dataset 4:')
print(pool4.get_feature_names() )<split> | submission.TARGET = test_pred[:,1]
submission.head(10 ) | Home Credit Default Risk |
22,248,594 | <train_model><EOS> | submission.to_csv('default_submission_wk06.csv', index=False, header = True ) | Home Credit Default Risk |
22,046,560 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<feature_engineering> | MainDir = ".. /input/.. /input/home-credit-default-risk"
test = pd.read_csv(f'{MainDir}/application_test.csv' ) | Home Credit Default Risk |
22,046,560 | model.get_feature_importance(prettified=True )<init_hyperparams> | preprocessor = joblib.load('.. /input/defaultdata08/default_preprocessor_08.joblib')
model = joblib.load('.. /input/defaultdata08/default_model_08.joblib')
print(type(model)) | Home Credit Default Risk |
22,046,560 | params = {}
params['loss_function'] = 'Logloss'
params['iterations'] = 93
params['custom_loss'] = 'AUC'
params['random_seed'] = 63
params['learning_rate'] = 0.5
cv_data = cv(
params = params,
pool = Pool(X, label=y, cat_features=cat_features),
fold_count=5,
shuffle=True,
partition_random_seed=0,
stratified=False,
verbose=False
)<train_model> | bureau = pd.read_csv(f'{MainDir}/bureau.csv')
print(bureau.shape, "- shape of bureau table")
bureau_balance = pd.read_csv(f'{MainDir}/bureau_balance.csv')
bb_status = pd.crosstab(bureau_balance.SK_ID_BUREAU, bureau_balance.STATUS)
bb_status.columns = ['BB_'+column for column in bb_status.columns]
bureau = bureau.merge(bb_status, left_on = 'SK_ID_BUREAU', right_on = 'SK_ID_BUREAU')
bureau = bureau.drop(['SK_ID_BUREAU'], axis = 1)
print(bureau.shape, "- shape of bureau table after merge")
bureau.columns = ['BU_'+column if column !='SK_ID_CURR' else column for column in bureau.columns]
bureau_num = bureau.groupby(by=['SK_ID_CURR'] ).mean().reset_index()
print(bureau_num.shape, "- shape of numeric features(incl index)")
bureau_cat = pd.get_dummies(bureau.select_dtypes('object'))
bureau_cat['SK_ID_CURR'] = bureau['SK_ID_CURR']
bureau_cat = bureau_cat.groupby(by = ['SK_ID_CURR'] ).mean().reset_index()
print(bureau_cat.shape, "- shape of categorical features(incl index)")
bureau_count = bureau.groupby(by = ['SK_ID_CURR'])['BU_CREDIT_ACTIVE'].count().reset_index()
bureau_count.rename(columns={'BU_CREDIT_ACTIVE':'COUNT_of_BUREAU'})
bureau_count.head(5)
test = test.merge(bureau_num, on='SK_ID_CURR', how='left')
test = test.merge(bureau_cat, on='SK_ID_CURR', how='left')
test = test.merge(bureau_count, on='SK_ID_CURR', how='left')
print(test.shape, "- shape of training data after merges")
list = ['bureau', 'bureau_num', 'bureau_cat', 'bureau_balance']
del list
gc.collect()
previous = pd.read_csv(f'{MainDir}/previous_application.csv')
print(previous.shape, "- shape of previous_application")
pos = pd.read_csv(f'{MainDir}/POS_CASH_balance.csv')
pos.columns = ['PO_'+column if column !='SK_ID_PREV' else column for column in pos.columns]
pos_num = pos.groupby(by=['SK_ID_PREV'] ).mean().reset_index()
print(pos_num.shape, "- shape of numeric features(incl index)")
pos_cat = pd.get_dummies(pos.select_dtypes('object'))
pos_cat['SK_ID_PREV'] = pos['SK_ID_PREV']
pos_cat = pos_cat.groupby(by = ['SK_ID_PREV'] ).mean().reset_index()
print(pos_cat.shape, "- shape of categorical features(incl index)")
previous = previous.merge(pos_num, on='SK_ID_PREV', how='left')
previous = previous.merge(pos_cat, on='SK_ID_PREV', how='left')
print(previous.shape, "- shape of previous data after merges")
list = ['pos', 'pos_num', 'pos_cat']
del list
gc.collect()
inst = pd.read_csv(f'{MainDir}/installments_payments.csv')
inst.columns = ['IP_'+column if column !='SK_ID_PREV' else column for column in inst.columns]
inst_num = inst.groupby(by=['SK_ID_PREV'] ).mean().reset_index()
print(inst_num.shape, "- shape of numeric features(incl index)")
previous = previous.merge(inst_num, left_on='SK_ID_PREV', right_on = 'SK_ID_PREV', how='left')
print(previous.shape, "- shape of previous data after merges")
list = ['inst', 'inst_num']
del list
gc.collect()
ccb = pd.read_csv(f'{MainDir}/credit_card_balance.csv')
ccb.columns = ['CC_'+column if column !='SK_ID_PREV' else column for column in ccb.columns]
ccb_num = ccb.groupby(by=['SK_ID_PREV'] ).mean().reset_index()
print(ccb_num.shape, "- shape of numeric features(incl index)")
ccb_cat = pd.get_dummies(ccb.select_dtypes('object'))
ccb_cat['SK_ID_PREV'] = ccb['SK_ID_PREV']
ccb_cat = ccb_cat.groupby(by = ['SK_ID_PREV'] ).mean().reset_index()
print(ccb_cat.shape, "- shape of categorical features(incl index)")
previous = previous.merge(ccb_num, on='SK_ID_PREV', how='left')
previous = previous.merge(ccb_cat, on='SK_ID_PREV', how='left')
print(previous.shape, "- shape of previous data after merges")
list = ['ccb', 'ccb_num', 'ccb_cat']
del list
gc.collect()
previous.columns = ['PR_'+column if column !='SK_ID_CURR' else column for column in previous.columns]
previous['PR_DAYS_LAST_DUE'].replace({365243: np.nan}, inplace = True)
previous['PR_DAYS_TERMINATION'].replace({365243: np.nan}, inplace = True)
previous['PR_DAYS_FIRST_DRAWING'].replace({365243: np.nan}, inplace = True)
previous_num = previous.groupby(by=['SK_ID_CURR'] ).mean().reset_index()
print(previous_num.shape, "- shape of numeric features(incl index)")
previous_cat = pd.get_dummies(previous.select_dtypes('object'))
previous_cat['SK_ID_CURR'] = previous['SK_ID_CURR']
previous_cat = bureau_cat.groupby(by = ['SK_ID_CURR'] ).mean().reset_index()
print(previous_cat.shape, "- shape of categorical features(incl index)")
test = test.merge(previous_num, on='SK_ID_CURR', how='left')
test = test.merge(previous_cat, on='SK_ID_CURR', how='left')
print(test.shape, "- shape of training data after merges")
list = ['previous', 'previous_num', 'previous_cat']
del list
gc.collect()
test['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True)
test['CI_ratio'] = test['AMT_CREDIT'] / test['AMT_INCOME_TOTAL']
test['AI_ratio'] = test['AMT_ANNUITY'] / test['AMT_INCOME_TOTAL']
test['AC_ratio'] = test['AMT_CREDIT'] / test['AMT_ANNUITY']
test['CG_ratio'] = test['AMT_CREDIT'] / test['AMT_GOODS_PRICE']
test['log_INCOME'] = np.log(test['AMT_INCOME_TOTAL'])
test['log_ANNUITY'] = np.log(test['AMT_ANNUITY'])
test['log_CREDIT'] = np.log(test['AMT_CREDIT'])
test['log_GOODS'] = np.log(test['AMT_GOODS_PRICE'])
test['MissingBureau'] = test.iloc[:, 41:44].isnull().sum(axis=1 ).astype("category")
test['FLAG_CG_ratio'] = test['AMT_CREDIT'] > test['AMT_GOODS_PRICE']
test['DAYS_ID_4200'] = test['DAYS_ID_PUBLISH'] < -4200
test['AVG_EXT'] = test.iloc[:, 41:44].sum(axis=1)/(3- test.iloc[:,41:44].isnull().sum(axis=1))
test['AVG_EXT'].replace(np.nan, 0.2, inplace = True)
test.EXT_SOURCE_1.fillna(test.AVG_EXT, inplace=True)
test.EXT_SOURCE_2.fillna(test.AVG_EXT, inplace=True)
test.EXT_SOURCE_3.fillna(test.AVG_EXT, inplace=True)
test.drop(['AVG_EXT'], axis = 1)
test.drop(['ORGANIZATION_TYPE'], axis = 1)
test['OD_ratio'] = test['BU_AMT_CREDIT_SUM_OVERDUE'] / test['BU_AMT_CREDIT_SUM_DEBT']
test['OD_ratio'].replace([np.nan, np.inf, -np.inf], 0, inplace = True)
test['Credit_ratio'] = test['BU_AMT_CREDIT_SUM'] / test['BU_AMT_CREDIT_SUM_LIMIT']
test['Credit_ratio'].replace([np.nan, np.inf, -np.inf], 0, inplace = True)
test['Debt_ratio'] = test['BU_AMT_CREDIT_SUM_DEBT'] / test['BU_AMT_CREDIT_SUM']
test['Debt_ratio'].replace([np.nan, np.inf, -np.inf], 0, inplace = True)
test['PR_term'] = test['PR_IP_AMT_PAYMENT'] / test['PR_IP_AMT_INSTALMENT']
test['PR_term'].replace([np.nan, np.inf, -np.inf], 0, inplace = True)
X_test = preprocessor.transform(test)
print(X_test.shape ) | Home Credit Default Risk |
22,046,560 | best_value = np.min(cv_data['test-Logloss-mean'])
best_iter = np.argmin(cv_data['test-Logloss-mean'])
print('Best validation Logloss score, not stratified: {:.4f}±{:.4f} on step {}'.format(
best_value,
cv_data['test-Logloss-std'][best_iter],
best_iter)
)<load_from_csv> | test_pred = model.predict_proba(X_test)
print(test_pred.shape)
print(test_pred[:5] ) | Home Credit Default Risk |
22,046,560 | <prepare_output><EOS> | submission = pd.read_csv('.. /input/home-credit-default-risk/sample_submission.csv')
submission.head(10)
submission.TARGET = test_pred[:,1]
submission.head(10)
submission.to_csv('default_submission_08.csv', index=False, header = True ) | Home Credit Default Risk |
19,576,721 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<save_to_csv> | import os
import gc
import numpy as np
import pandas as pd
from scipy.stats import kurtosis
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold
import xgboost as xgb
from xgboost import XGBClassifier | Home Credit Default Risk |
19,576,721 | os.chdir('/kaggle/working/')
os.curdir
sol = pd.DataFrame(predictions)
sol = sol.rename(columns={0:'Action'})
sol.index = range(1, 58922,1)
sol = sol.rename_axis('Id')
sol.to_csv('submission.csv' )<load_pretrained> | DATA_DIRECTORY = ".. /input/home-credit-loan-better-data-processing" | Home Credit Default Risk |
19,576,721 | with zipfile.ZipFile('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip','r')as zip_ref:
zip_ref.extractall("./sentiment-analysis-on-movie-reviews/")
with zipfile.ZipFile('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip','r')as zip_ref:
zip_ref.extractall("./sentiment-analysis-on-movie-reviews/" )<load_from_csv> | train = pd.read_csv(os.path.join(DATA_DIRECTORY, 'train.csv'))
test = pd.read_csv(os.path.join(DATA_DIRECTORY, 'test.csv'))
labels = pd.read_csv(os.path.join(DATA_DIRECTORY, 'labels.csv'))
| Home Credit Default Risk |
19,576,721 | data_source=pd.read_table("/kaggle/working/sentiment-analysis-on-movie-reviews/train.tsv",sep='\t')
data_source=data_source[['Phrase','Sentiment']].copy()
data_source<string_transform> | train = train.rename(columns = lambda x:re.sub('[^A-Za-z0-9_]+', '', x))
test = test.rename(columns = lambda x:re.sub('[^A-Za-z0-9_]+', '', x))
labels = labels.rename(columns = lambda x:re.sub('[^A-Za-z0-9_]+', '', x)) | Home Credit Default Risk |
19,576,721 | dff=[len(i.split(" ")) for i in data_source.Phrase[:10]]
max(dff )<import_modules> | train=np.nan_to_num(train)
test=np.nan_to_num(test)
labels=np.nan_to_num(labels ) | Home Credit Default Risk |
19,576,721 | from transformers import TFBertModel, BertConfig, BertTokenizerFast, TFAutoModel
from tensorflow.keras.layers import Input, Dropout, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.initializers import TruncatedNormal
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import CategoricalAccuracy
from tensorflow.keras.utils import to_categorical
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split<split> | train = pd.DataFrame(train)
test = pd.DataFrame(test)
labels=pd.DataFrame(labels ) | Home Credit Default Risk |
19,576,721 | X_train_data, X_validation_data, y_train_data, y_validation_data = train_test_split(data_source.index.values,
data_source.Sentiment.values,
test_size=0.10,
random_state=42,
stratify=data_source.Sentiment)
<feature_engineering> | X_train, X_test, y_train, y_test = train_test_split(train, labels, random_state=42 ) | Home Credit Default Risk |
19,576,721 | data_source['data_type'] = ['not_set']*data_source.shape[0]
data_source.loc[X_train_data, 'data_type'] = 'training'
data_source.loc[X_validation_data, 'data_type'] = 'validation'<count_missing_values> | clf = DummyClassifier(strategy= 'most_frequent' ).fit(X_train,y_train)
y_pred = clf.predict(X_test)
print('y actual :
' + str(y_test.value_counts()))
print('y predicted :
' + str(pd.Series(y_pred ).value_counts())) | Home Credit Default Risk |
19,576,721 | data_source.isnull().sum()<filter> | print('Accuracy Score : ' + str(accuracy_score(y_test,y_pred)))
print('Precision Score : ' + str(precision_score(y_test,y_pred)))
print('Recall Score : ' + str(recall_score(y_test,y_pred)))
print('F1 Score : ' + str(f1_score(y_test,y_pred)))
print('Confusion Matrix :
' + str(confusion_matrix(y_test,y_pred)) ) | Home Credit Default Risk |
19,576,721 | data_source[data_source.data_type=='training'].Phrase<load_pretrained> | clf = LGBMClassifier().fit(X_train,y_train)
y_pred = clf.predict(X_test)
print('Accuracy Score : ' + str(accuracy_score(y_test,y_pred)))
print('Precision Score : ' + str(precision_score(y_test,y_pred)))
print('Recall Score : ' + str(recall_score(y_test,y_pred)))
print('F1 Score : ' + str(f1_score(y_test,y_pred)))
print('Confusion Matrix :
' + str(confusion_matrix(y_test,y_pred)) ) | Home Credit Default Risk |
19,576,721 | max_token_length = max(dff)+3
number_of_samples = len(data_source)
bert = 'bert-base-cased'
config = BertConfig.from_pretrained(bert)
config.output_hidden_states = False
tokenizer = BertTokenizerFast.from_pretrained(pretrained_model_name_or_path = bert, config = config)
<categorify> | clf = LogisticRegression()
grid_values = {'penalty': ['l2'],'C':[0.001,.009,0.01,.09,1,5,10,25]}
grid_clf_acc = GridSearchCV(clf, param_grid = grid_values,scoring = 'recall')
grid_clf_acc.fit(X_train, y_train)
y_pred_acc = grid_clf_acc.predict(X_test)
print('Accuracy Score : ' + str(accuracy_score(y_test,y_pred_acc)))
print('Precision Score : ' + str(precision_score(y_test,y_pred_acc)))
print('Recall Score : ' + str(recall_score(y_test,y_pred_acc)))
print('F1 Score : ' + str(f1_score(y_test,y_pred_acc)))
confusion_matrix(y_test,y_pred_acc ) | Home Credit Default Risk |
19,576,721 | def map_function(input_ids, masks,labels):
return {'input_ids': input_ids, 'attention_mask': masks},labels<categorify> | pred = model.predict_proba(df_test ) | Home Credit Default Risk |
19,576,721 | <categorify><EOS> | submit = test[['SK_ID_CURR']]
submit['TARGET'] = pred
submit.to_csv('lgbm_Minimized_code.csv', index = False ) | Home Credit Default Risk |
18,348,927 | <SOS> metric: AUC Kaggle data source: home-credit-default-risk<define_variables> | warnings.simplefilter(action='ignore', category=FutureWarning ) | Home Credit Default Risk |
18,348,927 | batch_size = 32
train_dataset = train_dataset.shuffle(1000 ).batch(batch_size, drop_remainder=True )<categorify> | DATA_DIRECTORY = ".. /input/home-credit-default-risk" | Home Credit Default Risk |
18,348,927 | y_senti = to_categorical(data_source[data_source.data_type=='validation'].Sentiment)
v= tokenizer(
text=data_source[data_source.data_type=='validation'].Phrase.to_list() ,
add_special_tokens=True,
max_length=max_token_length,
truncation=True,
padding=True,
return_tensors='tf',
return_token_type_ids = False,
return_attention_mask = True,
verbose = True)
validation_dataset = tf.data.Dataset.from_tensor_slices(( v['input_ids'], v['attention_mask'], y_senti))
validation_dataset = validation_dataset.map(map_function)
validation_dataset = validation_dataset.shuffle(1000 ).batch(batch_size, drop_remainder=True )<choose_model_class> | df_train = pd.read_csv(os.path.join(DATA_DIRECTORY, 'application_train.csv'))
df_test = pd.read_csv(os.path.join(DATA_DIRECTORY, 'application_test.csv'))
df = df_train.append(df_test)
del df_train, df_test; gc.collect() | Home Credit Default Risk |
18,348,927 | input_ids = tf.keras.Input(shape=(max_token_length,), name='input_ids', dtype='int32')
attention_mask = tf.keras.Input(shape=(max_token_length,), name='attention_mask', dtype='int32')
inputs = {'input_ids': input_ids, 'attention_mask': attention_mask}
bert = TFAutoModel.from_pretrained('bert-base-cased')
embeddings = bert.bert(inputs)[1]
xis = tf.keras.layers.Dense(1024,activation='relu' )(embeddings)
yhat = tf.keras.layers.Dense(sent_values_array.max() +1, activation='softmax', name='outputs' )(xis)
<choose_model_class> | df = df[df['AMT_INCOME_TOTAL'] < 20000000]
df = df[df['CODE_GENDER'] != 'XNA']
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
df['DAYS_LAST_PHONE_CHANGE'].replace(0, np.nan, inplace=True ) | Home Credit Default Risk |
18,348,927 | optimizer = tf.keras.optimizers.Adam(lr=1e-5, decay=1e-6)
loss = tf.keras.losses.CategoricalCrossentropy()
accuracy = tf.keras.metrics.CategoricalAccuracy('accuracy' )<choose_model_class> | def get_age_group(days_birth):
age_years = -days_birth / 365
if age_years < 27: return 1
elif age_years < 40: return 2
elif age_years < 50: return 3
elif age_years < 65: return 4
elif age_years < 99: return 5
else: return 0 | Home Credit Default Risk |
18,348,927 | model.compile(optimizer=optimizer, loss=loss, metrics=[accuracy] )<train_model> | docs = [f for f in df.columns if 'FLAG_DOC' in f]
df['DOCUMENT_COUNT'] = df[docs].sum(axis=1)
df['NEW_DOC_KURT'] = df[docs].kurtosis(axis=1)
df['AGE_RANGE'] = df['DAYS_BIRTH'].apply(lambda x: get_age_group(x)) | Home Credit Default Risk |
18,348,927 | history1 = model.fit(
train_dataset,
validation_data= validation_dataset,
epochs=8 )<save_model> | df['EXT_SOURCES_PROD'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3']
df['EXT_SOURCES_WEIGHTED'] = df.EXT_SOURCE_1 * 2 + df.EXT_SOURCE_2 * 1 + df.EXT_SOURCE_3 * 3
np.warnings.filterwarnings('ignore', r'All-NaN(slice|axis)encountered')
for function_name in ['min', 'max', 'mean', 'nanmedian', 'var']:
feature_name = 'EXT_SOURCES_{}'.format(function_name.upper())
df[feature_name] = eval('np.{}'.format(function_name))(
df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']], axis=1 ) | Home Credit Default Risk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.