kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
8,201,023 | %%writefile -a submission.py
game = HungryGeese()
state_dict = pickle.loads(bz2.decompress(base64.b64decode(weight)))
agent = NNAgent(state_dict)
mcts = MCTS(game, agent)
def alphagoose_agent(obs, config):
timelimit = config.actTimeout+obs.remainingOverageTime/(config.episodeSteps-obs.step)if obs.step > 50 and obs.remainingOverageTime > 5 else 1
action = game.actions[np.argmax(
mcts.getActionProb(obs, timelimit)) ]
return action.name<import_modules> | speed_test = True | Deepfake Detection Challenge |
8,201,023 |
<set_options> | if speed_test:
start_time = time.time()
speedtest_videos = test_videos[:5]
predictions = predict_on_video_set(speedtest_videos, num_workers=4)
elapsed = time.time() - start_time
print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) ) | Deepfake Detection Challenge |
8,201,023 | seed = 50
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False<set_options> | predictions = predict_on_video_set(test_videos, num_workers=4 ) | Deepfake Detection Challenge |
8,201,023 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )<load_from_csv> | submission_df_resnext = pd.DataFrame({"filename": test_videos, "label": predictions})
submission_df_resnext.to_csv("submission_resnext.csv", index=False ) | Deepfake Detection Challenge |
8,201,023 | data_path = '/kaggle/input/aerial-cactus-identification/'
labels = pd.read_csv(data_path + 'train.csv')
submission = pd.read_csv(data_path + 'sample_submission.csv' )<load_pretrained> | !pip install.. /input/deepfake-xception-trained-model/pytorchcv-0.0.55-py2.py3-none-any.whl --quiet | Deepfake Detection Challenge |
8,201,023 | with ZipFile(data_path + 'train.zip')as zipper:
zipper.extractall()
with ZipFile(data_path + 'test.zip')as zipper:
zipper.extractall()<split> | test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/"
test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"])
len(test_videos ) | Deepfake Detection Challenge |
8,201,023 | _, valid = train_test_split(labels,
test_size=0.1,
stratify=labels['has_cactus'],
random_state=50 )<normalization> | gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) | Deepfake Detection Challenge |
8,201,023 | class ImageDataset(Dataset):
def __init__(self, df, img_dir='./', transform=None):
super().__init__()
self.df = df
self.img_dir = img_dir
self.transform = transform
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
img_id = self.df.iloc[idx, 0]
img_path = self.img_dir + img_id
image = cv2.imread(img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
label = self.df.iloc[idx, 1]
if self.transform is not None:
image = self.transform(image)
return image, label<normalization> | facedet = BlazeFace().to(gpu)
facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth")
facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy")
_ = facedet.train(False ) | Deepfake Detection Challenge |
8,201,023 | transform_train = transforms.Compose([transforms.ToPILImage() ,
transforms.Pad(32, padding_mode='symmetric'),
transforms.RandomHorizontalFlip() ,
transforms.RandomVerticalFlip() ,
transforms.RandomRotation(10),
transforms.ToTensor() ,
transforms.Normalize(( 0.485, 0.456, 0.406),
(0.229, 0.224, 0.225)) ])
transform_test= transforms.Compose([transforms.ToPILImage() ,
transforms.Pad(32, padding_mode='symmetric'),
transforms.ToTensor() ,
transforms.Normalize(( 0.485, 0.456, 0.406),
(0.229, 0.224, 0.225)) ] )<create_dataframe> | frames_per_video = 83
video_reader = VideoReader()
video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video)
face_extractor = FaceExtractor(video_read_fn, facedet ) | Deepfake Detection Challenge |
8,201,023 | dataset_train = ImageDataset(df=labels, img_dir='train/', transform=transform_train)
dataset_valid = ImageDataset(df=valid, img_dir='train/', transform=transform_test )<load_pretrained> | input_size = 150 | Deepfake Detection Challenge |
8,201,023 | loader_train = DataLoader(dataset=dataset_train, batch_size=32, shuffle=True)
loader_valid = DataLoader(dataset=dataset_valid, batch_size=32, shuffle=False )<import_modules> | mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
normalize_transform = Normalize(mean, std ) | Deepfake Detection Challenge |
8,201,023 | class Model(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32,
kernel_size=3, padding=2),
nn.BatchNorm2d(32),
nn.LeakyReLU() ,
nn.MaxPool2d(kernel_size=2))
self.layer2 = nn.Sequential(nn.Conv2d(in_channels=32, out_channels=64,
kernel_size=3, padding=2),
nn.BatchNorm2d(64),
nn.LeakyReLU() ,
nn.MaxPool2d(kernel_size=2))
self.layer3 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, padding=2),
nn.BatchNorm2d(128),
nn.LeakyReLU() ,
nn.MaxPool2d(kernel_size=2))
self.layer4 = nn.Sequential(nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, padding=2),
nn.BatchNorm2d(256),
nn.LeakyReLU() ,
nn.MaxPool2d(kernel_size=2))
self.layer5 = nn.Sequential(nn.Conv2d(in_channels=256, out_channels=512,
kernel_size=3, padding=2),
nn.BatchNorm2d(512),
nn.LeakyReLU() ,
nn.MaxPool2d(kernel_size=2))
self.avg_pool = nn.AvgPool2d(kernel_size=4)
self.fc1 = nn.Linear(in_features=512 * 1 * 1, out_features=64)
self.fc2 = nn.Linear(in_features=64, out_features=2)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.avg_pool(x)
x = x.view(-1, 512 * 1 * 1)
x = self.fc1(x)
x = self.fc2(x)
return x<train_model> | model = get_model("xception", pretrained=False)
model = nn.Sequential(*list(model.children())[:-1])
class Pooling(nn.Module):
def __init__(self):
super(Pooling, self ).__init__()
self.p1 = nn.AdaptiveAvgPool2d(( 1,1))
self.p2 = nn.AdaptiveMaxPool2d(( 1,1))
def forward(self, x):
x1 = self.p1(x)
x2 = self.p2(x)
return(x1+x2)* 0.5
model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(( 1,1)))
class Head(torch.nn.Module):
def __init__(self, in_f, out_f):
super(Head, self ).__init__()
self.f = nn.Flatten()
self.l = nn.Linear(in_f, 512)
self.d = nn.Dropout(0.5)
self.o = nn.Linear(512, out_f)
self.b1 = nn.BatchNorm1d(in_f)
self.b2 = nn.BatchNorm1d(512)
self.r = nn.ReLU()
def forward(self, x):
x = self.f(x)
x = self.b1(x)
x = self.d(x)
x = self.l(x)
x = self.r(x)
x = self.b2(x)
x = self.d(x)
out = self.o(x)
return out
class FCN(torch.nn.Module):
def __init__(self, base, in_f):
super(FCN, self ).__init__()
self.base = base
self.h1 = Head(in_f, 1)
def forward(self, x):
x = self.base(x)
return self.h1(x)
net = []
model = FCN(model, 2048)
model = model.cuda()
model.load_state_dict(torch.load('.. /input/deepfake-xception-trained-model/model.pth'))
net.append(model ) | Deepfake Detection Challenge |
8,201,023 | model = Model().to(device )<choose_model_class> | def predict_on_video(video_path, batch_size):
try:
faces = face_extractor.process_video(video_path)
face_extractor.keep_only_best_face(faces)
if len(faces)> 0:
x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8)
n = 0
for frame_data in faces:
for face in frame_data["faces"]:
resized_face = isotropically_resize_image(face, input_size)
resized_face = make_square_image(resized_face)
if n < batch_size:
x[n] = resized_face
n += 1
else:
print("WARNING: have %d faces but batch size is %d" %(n, batch_size))
if n > 0:
x = torch.tensor(x, device=gpu ).float()
x = x.permute(( 0, 3, 1, 2))
for i in range(len(x)) :
x[i] = normalize_transform(x[i] / 255.)
with torch.no_grad() :
y_pred = model(x)
y_pred = torch.sigmoid(y_pred.squeeze())
return np.percentile(y_pred[:n].to('cpu'), 51, interpolation="nearest")
except Exception as e:
print("Prediction error on video %s: %s" %(video_path, str(e)))
return 0.481 | Deepfake Detection Challenge |
8,201,023 | criterion = nn.CrossEntropyLoss()<choose_model_class> | def predict_on_video_set(videos, num_workers):
def process_file(i):
filename = videos[i]
y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video)
return y_pred
with ThreadPoolExecutor(max_workers=num_workers)as ex:
predictions = ex.map(process_file, range(len(videos)))
return list(predictions ) | Deepfake Detection Challenge |
8,201,023 | optimizer = torch.optim.Adamax(model.parameters() , lr=0.00006 )<categorify> | speed_test = True | Deepfake Detection Challenge |
8,201,023 | epochs = 70
for epoch in range(epochs):
epoch_loss = 0
for images, labels in loader_train:
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
print(f'에폭: [{epoch+1}/{epochs}], 손실값: {epoch_loss/len(loader_train):.4f}' )<load_pretrained> | if speed_test:
start_time = time.time()
speedtest_videos = test_videos[:5]
predictions = predict_on_video_set(speedtest_videos, num_workers=4)
elapsed = time.time() - start_time
print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) ) | Deepfake Detection Challenge |
8,201,023 | dataset_test = ImageDataset(df=submission, img_dir='test/', transform=transform_test)
loader_test = DataLoader(dataset=dataset_test, batch_size=32, shuffle=False)
model.eval()
preds = []
with torch.no_grad() :
for images, _ in loader_test:
images = images.to(device)
outputs = model(images)
preds_part = torch.softmax(outputs.cpu() , dim=1)[:, 1].tolist()
preds.extend(preds_part )<save_to_csv> | %%time
model.eval()
predictions = predict_on_video_set(test_videos, num_workers=4 ) | Deepfake Detection Challenge |
8,201,023 | submission['has_cactus'] = preds
submission.to_csv('submission.csv', index=False )<set_options> | submission_df_xception = pd.DataFrame({"filename": test_videos, "label": predictions})
submission_df_xception.to_csv("submission_xception.csv", index=False ) | Deepfake Detection Challenge |
8,201,023 | shutil.rmtree('./train')
shutil.rmtree('./test' )<set_options> | submission_df = pd.DataFrame({"filename": test_videos} ) | Deepfake Detection Challenge |
8,201,023 | warnings.filterwarnings('ignore' )<init_hyperparams> | r1 = 0.32
r2 = 0.68
total = r1 + r2
r11 = r1/total
r22 = r2/total | Deepfake Detection Challenge |
8,201,023 | def seed_everything(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8 ).min and c_max < np.iinfo(np.int8 ).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16 ).min and c_max < np.iinfo(np.int16 ).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32 ).min and c_max < np.iinfo(np.int32 ).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64 ).min and c_max < np.iinfo(np.int64 ).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16 ).min and c_max < np.finfo(np.float16 ).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32 ).min and c_max < np.finfo(np.float32 ).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem.usage decreased to {:5.2f} Mb({:.1f}% reduction)'.format(end_mem, 100 *(start_mem - end_mem)/ start_mem))
return df<predict_on_test> | submission_df["label"] = r22*submission_df_resnext["label"] + r11*submission_df_xception["label"] | Deepfake Detection Challenge |
8,201,023 | <define_variables><EOS> | submission_df.to_csv("submission.csv", index=False ) | Deepfake Detection Challenge |
7,717,913 | <SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<init_hyperparams> | %matplotlib inline
| Deepfake Detection Challenge |
7,717,913 | lgb_params = {
'objective':'binary',
'boosting_type':'gbdt',
'metric':'auc',
'n_jobs':-1,
'learning_rate':0.01,
'num_leaves': 2**8,
'max_depth':-1,
'tree_learner':'serial',
'colsample_bytree': 0.7,
'subsample_freq':1,
'subsample':0.7,
'n_estimators':800,
'max_bin':255,
'verbose':-1,
'seed': SEED,
'early_stopping_rounds':100,
}<load_pretrained> | test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/"
test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"])
len(test_videos ) | Deepfake Detection Challenge |
7,717,913 | print('Load Data')
train_df = pd.read_pickle('.. /input/ieee-fe-with-some-eda/train_df.pkl')
if LOCAL_TEST:
test_df = train_df[train_df['DT_M']==train_df['DT_M'].max() ].reset_index(drop=True)
train_df = train_df[train_df['DT_M']<(train_df['DT_M'].max() -1)].reset_index(drop=True)
else:
test_df = pd.read_pickle('.. /input/ieee-fe-with-some-eda/test_df.pkl')
remove_features = pd.read_pickle('.. /input/ieee-fe-with-some-eda/remove_features.pkl')
remove_features = list(remove_features['features_to_remove'].values)
print('Shape control:', train_df.shape, test_df.shape )<categorify> | print("PyTorch version:", torch.__version__)
print("CUDA version:", torch.version.cuda)
print("cuDNN version:", torch.backends.cudnn.version() ) | Deepfake Detection Challenge |
7,717,913 | features_columns = [col for col in list(train_df)if col not in remove_features]
train_df = reduce_mem_usage(train_df)
test_df = reduce_mem_usage(test_df )<predict_on_test> | gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
gpu | Deepfake Detection Challenge |
7,717,913 | if LOCAL_TEST:
lgb_params['learning_rate'] = 0.01
lgb_params['n_estimators'] = 20000
lgb_params['early_stopping_rounds'] = 100
test_predictions = make_predictions(train_df, test_df, features_columns, TARGET, lgb_params)
print(metrics.roc_auc_score(test_predictions[TARGET], test_predictions['prediction']))
else:
lgb_params['learning_rate'] = 0.005
lgb_params['n_estimators'] = 1800
lgb_params['early_stopping_rounds'] = 100
test_predictions = make_predictions(train_df, test_df, features_columns, TARGET, lgb_params, NFOLDS=8 )<save_to_csv> | facedet = BlazeFace().to(gpu)
facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth")
facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy")
_ = facedet.train(False ) | Deepfake Detection Challenge |
7,717,913 | if not LOCAL_TEST:
test_predictions['isFraud'] = test_predictions['prediction']
test_predictions[['TransactionID','isFraud']].to_csv('submission.csv', index=False )<define_variables> | frames_per_video = 17
video_reader = VideoReader()
video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video)
face_extractor = FaceExtractor(video_read_fn, facedet ) | Deepfake Detection Challenge |
7,717,913 | BUILD95 = True
BUILD96 = True
str_type = ['ProductCD', 'card4', 'card6', 'P_emaildomain', 'R_emaildomain','M1', 'M2', 'M3', 'M4','M5',
'M6', 'M7', 'M8', 'M9', 'id_12', 'id_15', 'id_16', 'id_23', 'id_27', 'id_28', 'id_29', 'id_30',
'id_31', 'id_33', 'id_34', 'id_35', 'id_36', 'id_37', 'id_38', 'DeviceType', 'DeviceInfo']
str_type += ['id-12', 'id-15', 'id-16', 'id-23', 'id-27', 'id-28', 'id-29', 'id-30',
'id-31', 'id-33', 'id-34', 'id-35', 'id-36', 'id-37', 'id-38']
cols = ['TransactionID', 'TransactionDT', 'TransactionAmt',
'ProductCD', 'card1', 'card2', 'card3', 'card4', 'card5', 'card6',
'addr1', 'addr2', 'dist1', 'dist2', 'P_emaildomain', 'R_emaildomain',
'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11',
'C12', 'C13', 'C14', 'D1', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7', 'D8',
'D9', 'D10', 'D11', 'D12', 'D13', 'D14', 'D15', 'M1', 'M2', 'M3', 'M4',
'M5', 'M6', 'M7', 'M8', 'M9']
v = [1, 3, 4, 6, 8, 11]
v += [13, 14, 17, 20, 23, 26, 27, 30]
v += [36, 37, 40, 41, 44, 47, 48]
v += [54, 56, 59, 62, 65, 67, 68, 70]
v += [76, 78, 80, 82, 86, 88, 89, 91]
v += [107, 108, 111, 115, 117, 120, 121, 123]
v += [124, 127, 129, 130, 136]
v += [138, 139, 142, 147, 156, 162]
v += [165, 160, 166]
v += [178, 176, 173, 182]
v += [187, 203, 205, 207, 215]
v += [169, 171, 175, 180, 185, 188, 198, 210, 209]
v += [218, 223, 224, 226, 228, 229, 235]
v += [240, 258, 257, 253, 252, 260, 261]
v += [264, 266, 267, 274, 277]
v += [220, 221, 234, 238, 250, 271]
v += [294, 284, 285, 286, 291, 297]
v += [303, 305, 307, 309, 310, 320]
v += [281, 283, 289, 296, 301, 314]
cols += ['V'+str(x)for x in v]
dtypes = {}
for c in cols+['id_0'+str(x)for x in range(1,10)]+['id_'+str(x)for x in range(10,34)]+\
['id-0'+str(x)for x in range(1,10)]+['id-'+str(x)for x in range(10,34)]:
dtypes[c] = 'float32'
for c in str_type: dtypes[c] = 'category'<load_from_csv> | input_size = 224 | Deepfake Detection Challenge |
7,717,913 | %%time
X_train = pd.read_csv('.. /input/ieee-fraud-detection/train_transaction.csv',index_col='TransactionID', dtype=dtypes, usecols=cols+['isFraud'])
train_id = pd.read_csv('.. /input/ieee-fraud-detection/train_identity.csv',index_col='TransactionID', dtype=dtypes)
X_train = X_train.merge(train_id, how='left', left_index=True, right_index=True)
X_test = pd.read_csv('.. /input/ieee-fraud-detection/test_transaction.csv',index_col='TransactionID', dtype=dtypes, usecols=cols)
test_id = pd.read_csv('.. /input/ieee-fraud-detection/test_identity.csv',index_col='TransactionID', dtype=dtypes)
fix = {o:n for o, n in zip(test_id.columns, train_id.columns)}
test_id.rename(columns=fix, inplace=True)
X_test = X_test.merge(test_id, how='left', left_index=True, right_index=True)
y_train = X_train['isFraud'].copy()
del train_id, test_id, X_train['isFraud']; x = gc.collect()
print('Train shape',X_train.shape,'test shape',X_test.shape )<feature_engineering> | mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
normalize_transform = Normalize(mean, std ) | Deepfake Detection Challenge |
7,717,913 | for i in range(1,16):
if i in [1,2,3,5,9]: continue
X_train['D'+str(i)] = X_train['D'+str(i)] - X_train.TransactionDT/np.float32(24*60*60)
X_test['D'+str(i)] = X_test['D'+str(i)] - X_test.TransactionDT/np.float32(24*60*60 )<data_type_conversions> | class MyResNeXt(models.resnet.ResNet):
def __init__(self, training=True):
super(MyResNeXt, self ).__init__(block=models.resnet.Bottleneck,
layers=[3, 4, 6, 3],
groups=32,
width_per_group=4)
self.fc = nn.Linear(2048, 1 ) | Deepfake Detection Challenge |
7,717,913 | %%time
for i,f in enumerate(X_train.columns):
if(np.str(X_train[f].dtype)=='category')|(X_train[f].dtype=='object'):
df_comb = pd.concat([X_train[f],X_test[f]],axis=0)
df_comb,_ = df_comb.factorize(sort=True)
if df_comb.max() >32000: print(f,'needs int32')
X_train[f] = df_comb[:len(X_train)].astype('int16')
X_test[f] = df_comb[len(X_train):].astype('int16')
elif f not in ['TransactionAmt','TransactionDT']:
mn = np.min(( X_train[f].min() ,X_test[f].min()))
X_train[f] -= np.float32(mn)
X_test[f] -= np.float32(mn)
X_train[f].fillna(-1,inplace=True)
X_test[f].fillna(-1,inplace=True )<categorify> | checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu)
model = MyResNeXt().to(gpu)
model.load_state_dict(checkpoint)
_ = model.eval()
del checkpoint | Deepfake Detection Challenge |
7,717,913 | def encode_FE(df1, df2, cols):
for col in cols:
df = pd.concat([df1[col],df2[col]])
vc = df.value_counts(dropna=True, normalize=True ).to_dict()
vc[-1] = -1
nm = col+'_FE'
df1[nm] = df1[col].map(vc)
df1[nm] = df1[nm].astype('float32')
df2[nm] = df2[col].map(vc)
df2[nm] = df2[nm].astype('float32')
print(nm,', ',end='')
def encode_LE(col,train=X_train,test=X_test,verbose=True):
df_comb = pd.concat([train[col],test[col]],axis=0)
df_comb,_ = df_comb.factorize(sort=True)
nm = col
if df_comb.max() >32000:
train[nm] = df_comb[:len(train)].astype('int32')
test[nm] = df_comb[len(train):].astype('int32')
else:
train[nm] = df_comb[:len(train)].astype('int16')
test[nm] = df_comb[len(train):].astype('int16')
del df_comb; x=gc.collect()
if verbose: print(nm,', ',end='')
def encode_AG(main_columns, uids, aggregations=['mean'], train_df=X_train, test_df=X_test,
fillna=True, usena=False):
for main_column in main_columns:
for col in uids:
for agg_type in aggregations:
new_col_name = main_column+'_'+col+'_'+agg_type
temp_df = pd.concat([train_df[[col, main_column]], test_df[[col,main_column]]])
if usena: temp_df.loc[temp_df[main_column]==-1,main_column] = np.nan
temp_df = temp_df.groupby([col])[main_column].agg([agg_type] ).reset_index().rename(
columns={agg_type: new_col_name})
temp_df.index = list(temp_df[col])
temp_df = temp_df[new_col_name].to_dict()
train_df[new_col_name] = train_df[col].map(temp_df ).astype('float32')
test_df[new_col_name] = test_df[col].map(temp_df ).astype('float32')
if fillna:
train_df[new_col_name].fillna(-1,inplace=True)
test_df[new_col_name].fillna(-1,inplace=True)
print("'"+new_col_name+"'",', ',end='')
def encode_CB(col1,col2,df1=X_train,df2=X_test):
nm = col1+'_'+col2
df1[nm] = df1[col1].astype(str)+'_'+df1[col2].astype(str)
df2[nm] = df2[col1].astype(str)+'_'+df2[col2].astype(str)
encode_LE(nm,verbose=False)
print(nm,', ',end='')
def encode_AG2(main_columns, uids, train_df=X_train, test_df=X_test):
for main_column in main_columns:
for col in uids:
comb = pd.concat([train_df[[col]+[main_column]],test_df[[col]+[main_column]]],axis=0)
mp = comb.groupby(col)[main_column].agg(['nunique'])['nunique'].to_dict()
train_df[col+'_'+main_column+'_ct'] = train_df[col].map(mp ).astype('float32')
test_df[col+'_'+main_column+'_ct'] = test_df[col].map(mp ).astype('float32')
print(col+'_'+main_column+'_ct, ',end='' )<categorify> | def predict_on_video(video_path, batch_size):
try:
faces = face_extractor.process_video(video_path)
face_extractor.keep_only_best_face(faces)
if len(faces)> 0:
x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8)
n = 0
for frame_data in faces:
for face in frame_data["faces"]:
resized_face = isotropically_resize_image(face, input_size)
resized_face = make_square_image(resized_face)
if n < batch_size:
x[n] = resized_face
n += 1
else:
print("WARNING: have %d faces but batch size is %d" %(n, batch_size))
if n > 0:
x = torch.tensor(x, device=gpu ).float()
x = x.permute(( 0, 3, 1, 2))
for i in range(len(x)) :
x[i] = normalize_transform(x[i] / 255.)
with torch.no_grad() :
y_pred = model(x)
y_pred = torch.sigmoid(y_pred.squeeze())
return y_pred[:n].mean().item()
except Exception as e:
print("Prediction error on video %s: %s" %(video_path, str(e)))
return 0.5 | Deepfake Detection Challenge |
7,717,913 | %%time
X_train['cents'] =(X_train['TransactionAmt'] - np.floor(X_train['TransactionAmt'])).astype('float32')
X_test['cents'] =(X_test['TransactionAmt'] - np.floor(X_test['TransactionAmt'])).astype('float32')
print('cents, ', end='')
encode_FE(X_train,X_test,['addr1','card1','card2','card3','P_emaildomain'])
encode_CB('card1','addr1')
encode_CB('card1_addr1','P_emaildomain')
encode_FE(X_train,X_test,['card1_addr1','card1_addr1_P_emaildomain'])
encode_AG(['TransactionAmt','D9','D11'],['card1','card1_addr1','card1_addr1_P_emaildomain'],['mean','std'],usena=True )<drop_column> | def predict_on_video_set(videos, num_workers):
def process_file(i):
filename = videos[i]
y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video)
return y_pred
with ThreadPoolExecutor(max_workers=num_workers)as ex:
predictions = ex.map(process_file, range(len(videos)))
return list(predictions ) | Deepfake Detection Challenge |
7,717,913 | cols = list(X_train.columns)
cols.remove('TransactionDT')
for c in ['D6','D7','D8','D9','D12','D13','D14']:
cols.remove(c)
for c in ['C3','M5','id_08','id_33']:
cols.remove(c)
for c in ['card4','id_07','id_14','id_21','id_30','id_32','id_34']:
cols.remove(c)
for c in ['id_'+str(x)for x in range(22,28)]:
cols.remove(c )<prepare_x_and_y> | speed_test = False | Deepfake Detection Challenge |
7,717,913 | idxT = X_train.index[:3*len(X_train)//4]
idxV = X_train.index[3*len(X_train)//4:]
<train_model> | if speed_test:
start_time = time.time()
speedtest_videos = test_videos[:5]
predictions = predict_on_video_set(speedtest_videos, num_workers=4)
elapsed = time.time() - start_time
print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) ) | Deepfake Detection Challenge |
7,717,913 | print("XGBoost version:", xgb.__version__)
if BUILD95:
clf = xgb.XGBClassifier(
n_estimators=2000,
max_depth=12,
learning_rate=0.02,
subsample=0.8,
colsample_bytree=0.4,
missing=-1,
eval_metric='auc',
tree_method='gpu_hist'
)
h = clf.fit(X_train.loc[idxT,cols], y_train[idxT],
eval_set=[(X_train.loc[idxV,cols],y_train[idxV])],
verbose=50, early_stopping_rounds=100 )<feature_engineering> | predictions = predict_on_video_set(test_videos, num_workers=4 ) | Deepfake Detection Challenge |
7,717,913 | <train_on_grid><EOS> | submission_df = pd.DataFrame({"filename": test_videos, "label": predictions})
submission_df.to_csv("submission.csv", index=False ) | Deepfake Detection Challenge |
7,463,602 | <SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<save_to_csv> | !pip install.. /input/facenet-pytorch-vggface2/facenet_pytorch-2.0.1-py3-none-any.whl
!mkdir -p /root/.cache/torch/checkpoints/
!cp.. /input/facenet-pytorch-vggface2/20180402-114759-vggface2-logits.pth /root/.cache/torch/checkpoints/vggface2_DG3kwML46X.pt
!cp.. /input/facenet-pytorch-vggface2/20180402-114759-vggface2-features.pth /root/.cache/torch/checkpoints/vggface2_G5aNV2VSMn.pt | Deepfake Detection Challenge |
7,463,602 | if BUILD95:
plt.hist(oof,bins=100)
plt.ylim(( 0,5000))
plt.title('XGB OOF')
plt.show()
X_train['oof'] = oof
X_train.reset_index(inplace=True)
X_train[['TransactionID','oof']].to_csv('oof_xgb_95.csv')
X_train.set_index('TransactionID',drop=True,inplace=True)
else: X_train['oof'] = 0<save_to_csv> | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
| Deepfake Detection Challenge |
7,463,602 | if BUILD95:
sample_submission = pd.read_csv('.. /input/ieee-fraud-detection/sample_submission.csv')
sample_submission.isFraud = preds
sample_submission.to_csv('sub_xgb_95.csv',index=False)
plt.hist(sample_submission.isFraud,bins=100)
plt.ylim(( 0,5000))
plt.title('XGB95 Submission')
plt.show()<data_type_conversions> | submission_path = '.. /input/deepfake-detection-challenge/sample_submission.csv'
train_video_path = '.. /input/deepfake-detection-challenge/train_sample_videos'
test_video_path = '.. /input/deepfake-detection-challenge/test_videos' | Deepfake Detection Challenge |
7,463,602 | X_train['day'] = X_train.TransactionDT /(24*60*60)
X_train['uid'] = X_train.card1_addr1.astype(str)+'_'+np.floor(X_train.day-X_train.D1 ).astype(str)
X_test['day'] = X_test.TransactionDT /(24*60*60)
X_test['uid'] = X_test.card1_addr1.astype(str)+'_'+np.floor(X_test.day-X_test.D1 ).astype(str )<categorify> | list_train = glob(os.path.join(train_video_path, '*.mp4'))
print(f'Sum video in train: {len(list_train)}' ) | Deepfake Detection Challenge |
7,463,602 | %%time
encode_FE(X_train,X_test,['uid'])
encode_AG(['TransactionAmt','D4','D9','D10','D15'],['uid'],['mean','std'],fillna=True,usena=True)
encode_AG(['C'+str(x)for x in range(1,15)if x!=3],['uid'],['mean'],X_train,X_test,fillna=True,usena=True)
encode_AG(['M'+str(x)for x in range(1,10)],['uid'],['mean'],fillna=True,usena=True)
encode_AG2(['P_emaildomain','dist1','DT_M','id_02','cents'], ['uid'], train_df=X_train, test_df=X_test)
encode_AG(['C14'],['uid'],['std'],X_train,X_test,fillna=True,usena=True)
encode_AG2(['C13','V314'], ['uid'], train_df=X_train, test_df=X_test)
encode_AG2(['V127','V136','V309','V307','V320'], ['uid'], train_df=X_train, test_df=X_test)
X_train['outsider15'] =(np.abs(X_train.D1-X_train.D15)>3 ).astype('int8')
X_test['outsider15'] =(np.abs(X_test.D1-X_test.D15)>3 ).astype('int8')
print('outsider15' )<drop_column> | list_test = glob(os.path.join(test_video_path, '*.mp4'))
print(f'Sum video in test: {len(list_test)}' ) | Deepfake Detection Challenge |
7,463,602 | cols = list(X_train.columns)
cols.remove('TransactionDT')
for c in ['D6','D7','D8','D9','D12','D13','D14']:
cols.remove(c)
for c in ['oof','DT_M','day','uid']:
cols.remove(c)
for c in ['C3','M5','id_08','id_33']:
cols.remove(c)
for c in ['card4','id_07','id_14','id_21','id_30','id_32','id_34']:
cols.remove(c)
for c in ['id_'+str(x)for x in range(22,28)]:
cols.remove(c )<prepare_x_and_y> | train_json = glob(os.path.join(train_video_path, '*.json'))
with open(train_json[0], 'rt')as file:
train = json.load(file)
train_df = pd.DataFrame()
train_df['file'] = train.keys()
label = [i['label'] for i in train.values() if isinstance(i, dict)]
train_df['label'] = label
split = [i['split'] for i in train.values() if isinstance(i, dict)]
train_df['split'] = split
original = [i['original'] for i in train.values() if isinstance(i, dict)]
train_df['original'] = original
train_df['original'] = train_df['original'].fillna(train_df['file'])
train_df.head() | Deepfake Detection Challenge |
7,463,602 | idxT = X_train.index[:3*len(X_train)//4]
idxV = X_train.index[3*len(X_train)//4:]
<train_model> | original_same = train_df.pivot_table(values=['file'], columns=['label'], index=['original'], fill_value=0, aggfunc='count')
original_same = original_same[(original_same[('file', 'FAKE')] != 0)&(original_same[('file', 'REAL')] != 0)]
print(f'Number of file having both FAKE and REAL: {len(original_same)}')
original_same | Deepfake Detection Challenge |
7,463,602 | if BUILD96:
clf = xgb.XGBClassifier(
n_estimators=2000,
max_depth=12,
learning_rate=0.02,
subsample=0.8,
colsample_bytree=0.4,
missing=-1,
eval_metric='auc',
tree_method='gpu_hist'
)
h = clf.fit(X_train.loc[idxT,cols], y_train[idxT],
eval_set=[(X_train.loc[idxV,cols],y_train[idxV])],
verbose=50, early_stopping_rounds=100 )<train_on_grid> | train_df['label'] = train_df['label'].apply(lambda x: 1 if x=='FAKE' else 0 ) | Deepfake Detection Challenge |
7,463,602 | if BUILD96:
oof = np.zeros(len(X_train))
preds = np.zeros(len(X_test))
skf = GroupKFold(n_splits=6)
for i,(idxT, idxV)in enumerate(skf.split(X_train, y_train, groups=X_train['DT_M'])) :
month = X_train.iloc[idxV]['DT_M'].iloc[0]
print('Fold',i,'withholding month',month)
print(' rows of train =',len(idxT),'rows of holdout =',len(idxV))
clf = xgb.XGBClassifier(
n_estimators=5000,
max_depth=12,
learning_rate=0.02,
subsample=0.8,
colsample_bytree=0.4,
missing=-1,
eval_metric='auc',
tree_method='gpu_hist'
)
h = clf.fit(X_train[cols].iloc[idxT], y_train.iloc[idxT],
eval_set=[(X_train[cols].iloc[idxV],y_train.iloc[idxV])],
verbose=100, early_stopping_rounds=200)
oof[idxV] += clf.predict_proba(X_train[cols].iloc[idxV])[:,1]
preds += clf.predict_proba(X_test[cols])[:,1]/skf.n_splits
del h, clf
x=gc.collect()
print('
print('XGB96 OOF CV=',roc_auc_score(y_train,oof))<save_to_csv> | def transfer(label):
if label==0:
return "real"
else:
return "fake"
def display_mtcnn(number_frame=3, number_video=2):
fake_real = original_same[(original_same[('file', 'FAKE')] == 1)&(original_same[('file', 'REAL')] == 1)].index.tolist()
original_images = random.sample(fake_real, number_video)
for original_image in original_images:
real_video = train_df[(train_df['label']==0)&(train_df['original']==original_image)]['file'].values[0]
fake_video = train_df[(train_df['label']==1)&(train_df['original']==original_image)]['file'].values[0]
if(real_video in os.listdir(train_video_path)) and(fake_video in os.listdir(train_video_path)) :
real_path = os.path.join(train_video_path, real_video)
fake_path = os.path.join(train_video_path, fake_video)
fig, axes = plt.subplots(number_frame, 2, figsize=(20, 20))
for ind, path in enumerate([real_path, fake_path]):
cap = cv2.VideoCapture(path)
frame_index = 0
ax_ix = 0
while True:
ret, frame = cap.read()
if cv2.waitKey(1)& 0xFF == 27:
break
if ret:
if frame_index%24==0:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = Image.fromarray(frame)
boxes, scores = box_mtcnn(frame, False)
box = boxes[scores.argmax() ]
frame_crop = frame.crop(box)
boxes, scores, landmarks = box_mtcnn(frame_crop)
if landmarks is not None:
landmark = landmarks[scores.argmax() ]
axes[ax_ix, ind].scatter(landmark[:, 0], landmark[:, 1], c='red', s=8)
axes[ax_ix, ind].imshow(frame_crop)
axes[ax_ix, ind].xaxis.set_visible(False)
axes[ax_ix, ind].yaxis.set_visible(False)
axes[ax_ix, ind].set_title(f'Frame: {frame_index}_{transfer(ind)}')
fig.tight_layout()
ax_ix += 1
if ax_ix == number_frame:
break
else:
break
frame_index+=1
fig.suptitle(original_image, color='b', size=20, y=1)
display_mtcnn(number_frame=3, number_video=3 ) | Deepfake Detection Challenge |
7,463,602 | if BUILD96:
plt.hist(oof,bins=100)
plt.ylim(( 0,5000))
plt.title('XGB OOF')
plt.show()
X_train['oof'] = oof
X_train.reset_index(inplace=True)
X_train[['TransactionID','oof']].to_csv('oof_xgb_96.csv')
X_train.set_index('TransactionID',drop=True,inplace=True )<save_to_csv> | class VideoDataset(Dataset):
def __init__(self, df, path_video, num_frame=5, is_train=True):
super(VideoDataset, self ).__init__()
self.df = df
self.num_frame = num_frame
self.is_train = is_train
self.path_video = path_video
index_list = deque()
for index in tqdm(range(len(self.df))):
video_name = self.df.loc[index, 'file']
video_path = os.path.join(self.path_video, video_name)
if self.landmark_mtcnn(video_path)is not None:
index_list.append(index)
index_list = list(index_list)
self.df = self.df[self.df.index.isin(index_list)]
self.df.reset_index(inplace=True, drop=True)
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
video_name = self.df.loc[idx, 'file']
video_path = os.path.join(self.path_video, video_name)
list_landmark = self.landmark_mtcnn(video_path)
if self.is_train:
label = self.df.loc[idx, 'label']
return torch.from_numpy(list_landmark), torch.tensor(label, dtype=torch.float)
else:
return video_name, torch.from_numpy(list_landmark)
def landmark_mtcnn(self, video_path):
cap = cv2.VideoCapture(video_path)
frame_index = 0
list_landmark = deque()
while len(list_landmark)< 10*self.num_frame:
ret, frame = cap.read()
if cv2.waitKey(1)& 0xFF == 27:
break
if ret:
if frame_index % 24 == 0:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = Image.fromarray(frame)
boxes, scores, landmarks = box_mtcnn(frame)
if scores[0]:
index_max = np.argmax(scores)
landmark = landmarks[index_max]
list_landmark.extend(landmark.flatten())
else:
break
frame_index+=1
list_landmark = list(list_landmark)
if len(list_landmark)== 10*self.num_frame:
list_landmark = np.array(list_landmark ).reshape(self.num_frame, 10)
return list_landmark
return None
dataset = VideoDataset(train_df, train_video_path ) | Deepfake Detection Challenge |
7,463,602 | if BUILD96:
sample_submission = pd.read_csv('.. /input/ieee-fraud-detection/sample_submission.csv')
sample_submission.isFraud = preds
sample_submission.to_csv('sub_xgb_96.csv',index=False)
plt.hist(sample_submission.isFraud,bins=100)
plt.ylim(( 0,5000))
plt.title('XGB96 Submission')
plt.show()<load_from_csv> | test_size = 0.2
index_split = int(len(dataset)*test_size)
list_index =(list(range(len(dataset))))
random.shuffle(list_index)
train_idx = list_index[index_split:]
val_idx = list_index[:index_split]
train_dataset = Subset(dataset, train_idx)
val_dataset = Subset(dataset, val_idx)
train_ld = DataLoader(train_dataset, batch_size=8, shuffle=True)
val_ld = DataLoader(val_dataset, batch_size=8, shuffle=True ) | Deepfake Detection Challenge |
7,463,602 | X_test['isFraud'] = sample_submission.isFraud.values
X_train['isFraud'] = y_train.values
comb = pd.concat([X_train[['isFraud']],X_test[['isFraud']]],axis=0)
uids = pd.read_csv('/kaggle/input/ieee-submissions-and-uids/uids_v4_no_multiuid_cleaning.. csv',usecols=['TransactionID','uid'] ).rename({'uid':'uid2'},axis=1)
comb = comb.merge(uids,on='TransactionID',how='left')
mp = comb.groupby('uid2' ).isFraud.agg(['mean'])
comb.loc[comb.uid2>0,'isFraud'] = comb.loc[comb.uid2>0].uid2.map(mp['mean'])
uids = pd.read_csv('/kaggle/input/ieee-submissions-and-uids/uids_v1_no_multiuid_cleaning.csv',usecols=['TransactionID','uid'] ).rename({'uid':'uid3'},axis=1)
comb = comb.merge(uids,on='TransactionID',how='left')
mp = comb.groupby('uid3' ).isFraud.agg(['mean'])
comb.loc[comb.uid3>0,'isFraud'] = comb.loc[comb.uid3>0].uid3.map(mp['mean'])
sample_submission.isFraud = comb.iloc[len(X_train):].isFraud.values
sample_submission.to_csv('sub_xgb_96_PP.csv',index=False )<import_modules> | class swish(Module):
def __init__(self):
super(swish, self ).__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return x*self.sigmoid(x)
class small_model(Module):
def __init__(self, num_class=1):
super(small_model, self ).__init__()
self.conv = nn.Sequential(nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(64),
nn.Dropout(0.2))
self.fc = nn.Sequential(nn.Linear(64*10*5, 128),
nn.ReLU(inplace=True),
nn.BatchNorm1d(128),
nn.Dropout(0.2),
nn.Linear(128, num_class),
nn.Sigmoid())
def forward(self, x):
x = self.conv(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
model = small_model().to(device)
model.eval() | Deepfake Detection Challenge |
7,463,602 | <import_modules><EOS> | class Trainer(object):
def __init__(self, model):
self.model = model
self.creation = nn.MSELoss()
self.optimizer = optim.AdamW([
{'params': model.conv.parameters() , 'lr': 1e-4},
{'params': model.fc.parameters() , 'lr': 1e-3}], lr=0.001)
self.scheduler = ReduceLROnPlateau(self.optimizer, mode='min', factor=0.15)
def train_process(self, train_ld, val_ld, epochs):
score_max = 0
check_step = 0
loss_min = 1
check_number = 13
self.model.train()
for epoch in range(epochs):
train_loss, val_loss = 0, 0
for crop, label in tqdm(train_ld):
crop = crop.unsqueeze(1)
crop, label = crop.float().to(device), label.to(device)
self.optimizer.zero_grad()
output = self.model(crop ).squeeze(1)
loss = self.creation(output, label)
loss.backward()
self.optimizer.step()
self.scheduler.step(train_loss)
train_loss += loss.item()
del crop, label
train_loss = train_loss/len(train_ld)
torch.cuda.empty_cache()
gc.collect()
self.model.eval()
val_score = 0
with torch.no_grad() :
for crop, label in tqdm(val_ld):
crop = crop.unsqueeze(1)
crop, label = crop.float().to(device), label.to(device)
output = self.model(crop ).squeeze(1)
loss = self.creation(output, label)
val_loss += loss.item()
val_score += torch.sum(( output>0.5 ).float() == label ).item() /len(label)
val_loss = val_loss/len(val_ld)
val_score = val_score/len(val_ld)
self.scheduler.step(val_loss)
if val_score > score_max:
print(f'Epoch: {epoch}, train loss: {train_loss:.5f}, val_loss: {val_loss:.5f}.
Validation score increased from {score_max:.5f} to {val_score:.5f}')
score_max = val_score
loss_min = val_loss
torch.save(self.model.state_dict() , 'model.pth')
print('Saving model!')
check_step = 0
elif val_score == score_max:
if val_loss < loss_min:
print(f'Epoch: {epoch}, train loss: {train_loss:.5f}, val_loss: {val_loss:.5f}, val_score: {val_score:.5f}.
Validation loss decreased from {loss_min:.5f} to {val_loss:.5f}')
loss_min = val_loss
torch.save(self.model.state_dict() , 'model.pth')
print('Saving model!')
check_step = 0
else:
check_step += 1
print(f'Epoch: {epoch}, train loss: {train_loss:.5f}, val_loss: {val_loss:.5f}, val_score: {val_score:.5f}.
Model not improve in {str(check_step)} step')
if check_step > check_number:
print('Stop trainning!')
break
else:
check_step += 1
print(f'Epoch: {epoch}, train loss: {train_loss:.5f}, val_loss: {val_loss:.5f}.
Validation score not increased from {val_score:.5f} in {str(check_step)} step')
if check_step > check_number:
print('Stop trainning!')
break
trainer = Trainer(model)
trainer.train_process(train_ld=train_ld, val_ld=val_ld, epochs=20 ) | Deepfake Detection Challenge |
17,910,828 | model.train_model(shuffled_training,
acc=sklearn.metrics.accuracy_score,
f1=sklearn.metrics.f1_score )<predict_on_test> | %matplotlib inline
warnings.filterwarnings('ignore')
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 200 ) | Home Credit Default Risk |
17,910,828 | result, model_outputs, wrong_predictions = model.eval_model(shuffled_training,
acc=sklearn.metrics.accuracy_score,
f1=sklearn.metrics.f1_score )<compute_test_metric> | app_train = pd.read_csv('.. /input/home-credit-default-risk/application_train.csv')
app_test = pd.read_csv('.. /input/home-credit-default-risk/application_test.csv' ) | Home Credit Default Risk |
17,910,828 | result<predict_on_test> | app_train.isnull().sum() | Home Credit Default Risk |
17,910,828 | predictions, raw_outputs = model.predict(test["text"].to_list() )<create_dataframe> | app_train['TARGET'].value_counts() | Home Credit Default Risk |
17,910,828 | mypreds = pd.DataFrame(test[["id"]])
mypreds["target"] = predictions<save_to_csv> | columns = ['AMT_INCOME_TOTAL','AMT_CREDIT', 'AMT_ANNUITY', 'AMT_GOODS_PRICE', 'DAYS_BIRTH', 'DAYS_EMPLOYED', 'DAYS_ID_PUBLISH',
'DAYS_REGISTRATION', 'DAYS_LAST_PHONE_CHANGE', 'CNT_FAM_MEMBERS', 'REGION_RATING_CLIENT', 'EXT_SOURCE_1',
'EXT_SOURCE_2', 'EXT_SOURCE_3', 'AMT_REQ_CREDIT_BUREAU_HOUR', 'AMT_REQ_CREDIT_BUREAU_DAY', 'AMT_REQ_CREDIT_BUREAU_WEEK',
'AMT_REQ_CREDIT_BUREAU_MON', 'AMT_REQ_CREDIT_BUREAU_QRT', 'AMT_REQ_CREDIT_BUREAU_YEAR']
show_hist_by_target(app_train, columns ) | Home Credit Default Risk |
17,910,828 | mypreds.to_csv("submission.csv", index=False )<install_modules> | app_train['DAYS_BIRTH']=abs(app_train['DAYS_BIRTH'])
app_train['DAYS_BIRTH'].corr(app_train['TARGET'] ) | Home Credit Default Risk |
17,910,828 | ! python -m pip install tf-models-nightly --no-deps -q
! python -m pip install tf-models-official==2.4.0 -q
! python -m pip install tensorflow-gpu==2.4.1 -q
! python -m pip install tensorflow-text==2.4.1 -q
! python -m spacy download en_core_web_sm -q
! python -m spacy validate<import_modules> | app_train[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].isnull().sum() | Home Credit Default Risk |
17,910,828 | print(f'TensorFlow Version: {tf.__version__}')
print(f'Python Version: {python_version() }' )<set_options> | app_train['EXT_SOURCE_3'].value_counts(dropna=False ) | Home Credit Default Risk |
17,910,828 | RANDOM_SEED = 123
nlp = spacy.load('en_core_web_sm')
pd.set_option('display.max_colwidth', None)
rcParams['figure.figsize'] =(10, 6)
sns.set_theme(palette='muted', style='whitegrid' )<load_from_csv> | cond_1 =(app_train['TARGET'] == 1)
cond_0 =(app_train['TARGET'] == 0)
print(app_train['CODE_GENDER'].value_counts() /app_train.shape[0])
print('
연체인 경우
',app_train[cond_1]['CODE_GENDER'].value_counts() /app_train[cond_1].shape[0])
print('
연체가 아닌 경우
',app_train[cond_0]['CODE_GENDER'].value_counts() /app_train[cond_0].shape[0] ) | Home Credit Default Risk |
17,910,828 | path = '.. /input/nlp-getting-started/train.csv'
df = pd.read_csv(path)
print(df.shape)
df.head()<load_from_csv> | app_train = pd.read_csv('.. /input/home-credit-default-risk/application_train.csv')
app_test = pd.read_csv('.. /input/home-credit-default-risk/application_test.csv' ) | Home Credit Default Risk |
17,910,828 | path_test = '.. /input/nlp-getting-started/test.csv'
df_test = pd.read_csv(path_test)
print(df_test.shape)
df_test.head()<count_duplicates> | apps = pd.concat([app_train, app_test])
print(apps.shape ) | Home Credit Default Risk |
17,910,828 | duplicates = df[df.duplicated(['text', 'target'], keep=False)]
print(f'Train Duplicate Entries(text, target): {len(duplicates)}')
duplicates.head()<remove_duplicates> | def get_apps_processed(apps):
apps['APPS_EXT_SOURCE_MEAN'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)
apps['APPS_EXT_SOURCE_STD'] = apps[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)
apps['APPS_EXT_SOURCE_STD'] = apps['APPS_EXT_SOURCE_STD'].fillna(apps['APPS_EXT_SOURCE_STD'].mean())
apps['APPS_ANNUITY_CREDIT_RATIO'] = apps['AMT_ANNUITY']/apps['AMT_CREDIT']
apps['APPS_GOODS_CREDIT_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_CREDIT']
apps['APPS_ANNUITY_INCOME_RATIO'] = apps['AMT_ANNUITY']/apps['AMT_INCOME_TOTAL']
apps['APPS_CREDIT_INCOME_RATIO'] = apps['AMT_CREDIT']/apps['AMT_INCOME_TOTAL']
apps['APPS_GOODS_INCOME_RATIO'] = apps['AMT_GOODS_PRICE']/apps['AMT_INCOME_TOTAL']
apps['APPS_CNT_FAM_INCOME_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['CNT_FAM_MEMBERS']
apps['APPS_EMPLOYED_BIRTH_RATIO'] = apps['DAYS_EMPLOYED']/apps['DAYS_BIRTH']
apps['APPS_INCOME_EMPLOYED_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_EMPLOYED']
apps['APPS_INCOME_BIRTH_RATIO'] = apps['AMT_INCOME_TOTAL']/apps['DAYS_BIRTH']
apps['APPS_CAR_BIRTH_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_BIRTH']
apps['APPS_CAR_EMPLOYED_RATIO'] = apps['OWN_CAR_AGE'] / apps['DAYS_EMPLOYED']
return apps | Home Credit Default Risk |
17,910,828 | df.drop_duplicates(['text', 'target'], inplace=True, ignore_index=True)
print(df.shape, df_test.shape )<count_duplicates> | prev_app = pd.read_csv('.. /input/home-credit-default-risk/previous_application.csv')
print(prev_app.shape, apps.shape ) | Home Credit Default Risk |
17,910,828 | new_duplicates = df[df.duplicated(['keyword', 'text'], keep=False)]
print(f'Train Duplicate Entries(keyword, text): {len(new_duplicates)}')
new_duplicates[['text', 'target']].sort_values(by='text' )<drop_column> | prev_app_outer = prev_app.merge(apps['SK_ID_CURR'], on='SK_ID_CURR', how='outer', indicator=True ) | Home Credit Default Risk |
17,910,828 | df.drop([4253, 4193, 2802, 4554, 4182, 3212, 4249, 4259, 6535, 4319, 4239, 606, 3936, 6018, 5573], inplace=True )<drop_column> | prev_app_outer['_merge'].value_counts() | Home Credit Default Risk |
17,910,828 | df = df.reset_index(drop=True)
df<count_values> | def missing_data(data):
total = data.isnull().sum().sort_values(ascending = False)
percent =(data.isnull().sum() /data.isnull().count() *100 ).sort_values(ascending = False)
return pd.concat([total, percent], axis=1, keys=['Total', 'Percent'] ) | Home Credit Default Risk |
17,910,828 | df['target'].value_counts() / len(df )<count_missing_values> | prev_app.groupby('SK_ID_CURR' ).count() | Home Credit Default Risk |
17,910,828 | def null_table(data):
null_list = []
for i in data:
if data[i].notnull().any() :
null_list.append(data[i].notnull().value_counts())
return pd.DataFrame(pd.concat(null_list, axis=1 ).T )<count_missing_values> | prev_app.groupby('SK_ID_CURR')['SK_ID_CURR'].count() | Home Credit Default Risk |
17,910,828 | null_table(df )<count_missing_values> | app_prev_target = prev_app.merge(app_train[['SK_ID_CURR', 'TARGET']], on='SK_ID_CURR', how='left')
app_prev_target.shape | Home Credit Default Risk |
17,910,828 | null_table(df_test )<define_variables> | num_columns = [column for column in num_columns if column not in ['SK_ID_PREV', 'SK_ID_CURR', 'TARGET']]
num_columns | Home Credit Default Risk |
17,910,828 | text = df['text']
target = df['target']
test_text = df_test['text']
for i in np.random.randint(500, size=5):
print(f'Tweet
' * 2 )<define_variables> | app_prev_target.TARGET.value_counts() | Home Credit Default Risk |
17,910,828 | lookup_dict = {
'abt' : 'about',
'afaik' : 'as far as i know',
'bc' : 'because',
'bfn' : 'bye for now',
'bgd' : 'background',
'bh' : 'blockhead',
'br' : 'best regards',
'btw' : 'by the way',
'cc': 'carbon copy',
'chk' : 'check',
'dam' : 'do not annoy me',
'dd' : 'dear daughter',
'df': 'dear fiance',
'ds' : 'dear son',
'dyk' : 'did you know',
'em': 'email',
'ema' : 'email address',
'ftf' : 'face to face',
'fb' : 'facebook',
'ff' : 'follow friday',
'fotd' : 'find of the day',
'ftw': 'for the win',
'fwiw' : 'for what it is worth',
'gts' : 'guess the song',
'hagn' : 'have a good night',
'hand' : 'have a nice day',
'hotd' : 'headline of the day',
'ht' : 'heard through',
'hth' : 'hope that helps',
'ic' : 'i see',
'icymi' : 'in case you missed it',
'idk' : 'i do not know',
'ig': 'instagram',
'iirc' : 'if i remember correctly',
'imho' : 'in my humble opinion',
'imo' : 'in my opinion',
'irl' : 'in real life',
'iwsn' : 'i want sex now',
'jk' : 'just kidding',
'jsyk' : 'just so you know',
'jv' : 'joint venture',
'kk' : 'cool cool',
'kyso' : 'knock your socks off',
'lmao' : 'laugh my ass off',
'lmk' : 'let me know',
'lo' : 'little one',
'lol' : 'laugh out loud',
'mm' : 'music monday',
'mirl' : 'meet in real life',
'mrjn' : 'marijuana',
'nbd' : 'no big deal',
'nct' : 'nobody cares though',
'njoy' : 'enjoy',
'nsfw' : 'not safe for work',
'nts' : 'note to self',
'oh' : 'overheard',
'omg': 'oh my god',
'oomf' : 'one of my friends',
'orly' : 'oh really',
'plmk' : 'please let me know',
'pnp' : 'party and play',
'qotd' : 'quote of the day',
're' : 'in reply to in regards to',
'rtq' : 'read the question',
'rt' : 'retweet',
'sfw' : 'safe for work',
'smdh' : 'shaking my damn head',
'smh' : 'shaking my head',
'so' : 'significant other',
'srs' : 'serious',
'tftf' : 'thanks for the follow',
'tftt' : 'thanks for this tweet',
'tj' : 'tweetjack',
'tl' : 'timeline',
'tldr' : 'too long did not read',
'tmb' : 'tweet me back',
'tt' : 'trending topic',
'ty' : 'thank you',
'tyia' : 'thank you in advance',
'tyt' : 'take your time',
'tyvw' : 'thank you very much',
'w': 'with',
'wtv' : 'whatever',
'ygtr' : 'you got that right',
'ykwim' : 'you know what i mean',
'ykyat' : 'you know you are addicted to',
'ymmv' : 'your mileage may vary',
'yolo' : 'you only live once',
'yoyo' : 'you are on your own',
'yt': 'youtube',
'yw' : 'you are welcome',
'zomg' : 'oh my god to the maximum'
}<string_transform> | print(app_prev_target.groupby('TARGET' ).agg({'AMT_ANNUITY': ['mean', 'median', 'count']}))
print(app_prev_target.groupby('TARGET' ).agg({'AMT_APPLICATION': ['mean', 'median', 'count']}))
print(app_prev_target.groupby('TARGET' ).agg({'AMT_CREDIT': ['mean', 'median', 'count']})) | Home Credit Default Risk |
17,910,828 | def lemmatize_text(text, nlp=nlp):
doc = nlp(text)
lemma_sent = [i.lemma_ for i in doc if not i.is_stop]
return ' '.join(lemma_sent)
def abbrev_conversion(text):
words = text.split()
abbrevs_removed = []
for i in words:
if i in lookup_dict:
i = lookup_dict[i]
abbrevs_removed.append(i)
return ' '.join(abbrevs_removed)
def standardize_text(text_data):
entity_pattern = re.compile(r'(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)')
url_pattern = re.compile(r'(?:\@|http?\://|https?\://|www)\S+')
retweet_pattern = re.compile(r'^(RT|RT:)\s+')
digit_pattern = re.compile(r'[\d]+')
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F"
u"\U0001F300-\U0001F5FF"
u"\U0001F680-\U0001F6FF"
u"\U0001F1E0-\U0001F1FF"
u"\U00002500-\U00002BEF"
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f"
u"\u3030"
"]+", flags=re.UNICODE)
url_strip = text_data.apply(lambda x: re.sub(url_pattern, '', x)if pd.isna(x)!= True else x)
html_parse = url_strip.apply(lambda x: bs(x, 'html.parser' ).get_text() if pd.isna(x)!= True else x)
retweet_strip = html_parse.apply(lambda x: re.sub(retweet_pattern, '', x)if pd.isna(x)!= True else x)
emoji_strip = retweet_strip.apply(lambda x: re.sub(emoji_pattern, '', x)if pd.isna(x)!= True else x)
entity_strip = emoji_strip.apply(lambda x: re.sub(entity_pattern, '', x)if pd.isna(x)!= True else x)
lowercase = entity_strip.apply(lambda x: str.lower(x)if pd.isna(x)!= True else x)
punct_strip = lowercase.apply(lambda x: re.sub(f'[{re.escape(string.punctuation)}]', '', x)if pd.isna(x)!= True else x)
abbrev_converted = punct_strip.apply(lambda x: abbrev_conversion(x)if pd.isna(x)!= True else x)
digit_strip = abbrev_converted.apply(lambda x: re.sub(digit_pattern, '', x)if pd.isna(x)!= True else x)
lemma_and_stop = digit_strip.apply(lambda x: lemmatize_text(x)if pd.isna(x)!= True else x)
return lemma_and_stop<create_dataframe> | app_prev_target.groupby(['NAME_CONTRACT_TYPE', 'NAME_GOODS_CATEGORY'] ).agg({'AMT_ANNUITY': ['mean', 'median', 'count', 'max']} ) | Home Credit Default Risk |
17,910,828 | df['clean_text'] = pd.DataFrame(clean_text)
df_test['clean_text'] = pd.DataFrame(test_clean_text )<feature_engineering> | prev_app.groupby('SK_ID_CURR' ) | Home Credit Default Risk |
17,910,828 | df['clean_text'] = df['clean_text'].apply(lambda x: re.sub(pattern_new, '', x)if pd.isna(x)!= True else x)
df_test['clean_text'] = df_test['clean_text'].apply(lambda x: re.sub(pattern_new, '', x)if pd.isna(x)!= True else x )<count_values> | prev_group = prev_app.groupby('SK_ID_CURR')
prev_group.head() | Home Credit Default Risk |
17,910,828 | print('Training Counts of 'new': ', len(re.findall(pattern_new, ' '.join(df['clean_text']))))
print('Test Counts of 'new': ', len(re.findall(pattern_new, ' '.join(df_test['clean_text']))))<load_pretrained> | prev_agg = pd.DataFrame()
prev_agg['CNT'] = prev_group['SK_ID_CURR'].count()
prev_agg.head() | Home Credit Default Risk |
17,910,828 | sentence_enc = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4' )<string_transform> | prev_agg['AVG_CREDIT'] = prev_group['AMT_CREDIT'].mean()
prev_agg['MAX_CREDIT'] = prev_group['AMT_CREDIT'].max()
prev_agg['MIN_CREDIT'] = prev_group['AMT_CREDIT'].min()
prev_agg.head() | Home Credit Default Risk |
17,910,828 | def extract_keywords(text, nlp=nlp):
potential_keywords = []
TOP_KEYWORD = -1
pos_tag = ['ADJ', 'NOUN', 'PROPN']
doc = nlp(text)
for i in doc:
if i.pos_ in pos_tag:
potential_keywords.append(i.text)
document_embed = sentence_enc([text])
potential_embed = sentence_enc(potential_keywords)
vector_distances = cosine_similarity(document_embed, potential_embed)
keyword = [potential_keywords[i] for i in vector_distances.argsort() [0][TOP_KEYWORD:]]
return keyword
def keyword_filler(keyword, text):
if pd.isnull(keyword):
try:
keyword = extract_keywords(text)[0]
except:
keyword = ''
return keyword<data_type_conversions> | prev_group = prev_app.groupby('SK_ID_CURR')
prev_agg1 = prev_group['AMT_CREDIT'].agg(['mean', 'max', 'min'])
prev_agg2 = prev_group['AMT_ANNUITY'].agg(['mean', 'max', 'min'])
prev_agg = prev_agg1.merge(prev_agg2, on='SK_ID_CURR', how='inner')
prev_agg.head() | Home Credit Default Risk |
17,910,828 | df['keyword_fill'] = pd.DataFrame(list(map(keyword_filler, df['keyword'], df['clean_text'])) ).astype(str)
df_test['keyword_fill'] = pd.DataFrame(list(map(keyword_filler, df_test['keyword'], df_test['clean_text'])) ).astype(str)
print('Null Training Keywords => ', df['keyword_fill'].isnull().any())
print('Null Test Keywords => ', df_test['keyword_fill'].isnull().any() )<create_dataframe> | prev_app['PREV_CREDIT_DIFF'] = prev_app['AMT_APPLICATION'] - prev_app['AMT_CREDIT']
prev_app['PREV_GOODS_DIFF'] = prev_app['AMT_APPLICATION'] - prev_app['AMT_GOODS_PRICE']
prev_app['PREV_CREDIT_APPL_RATIO'] = prev_app['AMT_CREDIT']/prev_app['AMT_APPLICATION']
prev_app['PREV_ANNUITY_APPL_RATIO'] = prev_app['AMT_ANNUITY']/prev_app['AMT_APPLICATION']
prev_app['PREV_GOODS_APPL_RATIO'] = prev_app['AMT_GOODS_PRICE']/prev_app['AMT_APPLICATION'] | Home Credit Default Risk |
17,910,828 | df['keyword_fill'] = pd.DataFrame(standardize_text(df['keyword_fill']))
df_test['keyword_fill'] = pd.DataFrame(standardize_text(df_test['keyword_fill']))<count_values> | prev_app['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace=True)
prev_app['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev_app['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev_app['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev_app['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
prev_app['PREV_DAYS_LAST_DUE_DIFF'] = prev_app['DAYS_LAST_DUE_1ST_VERSION'] - prev_app['DAYS_LAST_DUE'] | Home Credit Default Risk |
17,910,828 | keyword_count_0 = pd.DataFrame(df['keyword_fill'][df['target']==0].value_counts().reset_index())
keyword_count_1 = pd.DataFrame(df['keyword_fill'][df['target']==1].value_counts().reset_index() )<define_variables> | all_pay = prev_app['AMT_ANNUITY'] * prev_app['CNT_PAYMENT']
prev_app['PREV_INTERESTS_RATE'] =(all_pay/prev_app['AMT_CREDIT'] - 1)/prev_app['CNT_PAYMENT'] | Home Credit Default Risk |
17,910,828 | train_features = df[['clean_text','keyword_fill']]
test_features = df_test[['clean_text', 'keyword_fill']]<split> | agg_dict = {
'SK_ID_CURR':['count'],
'AMT_CREDIT':['mean', 'max', 'sum'],
'AMT_ANNUITY':['mean', 'max', 'sum'],
'AMT_APPLICATION':['mean', 'max', 'sum'],
'AMT_DOWN_PAYMENT':['mean', 'max', 'sum'],
'AMT_GOODS_PRICE':['mean', 'max', 'sum'],
'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
'PREV_CREDIT_DIFF':['mean', 'max', 'sum'],
'PREV_CREDIT_APPL_RATIO':['mean', 'max'],
'PREV_GOODS_DIFF':['mean', 'max', 'sum'],
'PREV_GOODS_APPL_RATIO':['mean', 'max'],
'PREV_DAYS_LAST_DUE_DIFF':['mean', 'max', 'sum'],
'PREV_INTERESTS_RATE':['mean', 'max']
} | Home Credit Default Risk |
17,910,828 | train_x, val_x, train_y, val_y = train_test_split(
train_features,
target,
test_size=0.2,
random_state=RANDOM_SEED,
)
print(train_x.shape)
print(train_y.shape)
print(val_x.shape)
print(val_y.shape )<create_dataframe> | prev_group = prev_app.groupby('SK_ID_CURR')
prev_amt_agg = prev_group.agg(agg_dict)
prev_amt_agg.columns = ['PREV_'+('_' ).join(column ).upper() for column in prev_amt_agg.columns.ravel() ] | Home Credit Default Risk |
17,910,828 | train_ds = tf.data.Dataset.from_tensor_slices(( dict(train_x), train_y))
val_ds = tf.data.Dataset.from_tensor_slices(( dict(val_x), val_y))
test_ds = tf.data.Dataset.from_tensor_slices(dict(test_features))<categorify> | prev_app['NAME_CONTRACT_STATUS'].value_counts() | Home Credit Default Risk |
17,910,828 | AUTOTUNE = tf.data.experimental.AUTOTUNE
BUFFER_SIZE = 1000
BATCH_SIZE = 32
def configure_dataset(dataset, shuffle=False, test=False):
if shuffle:
dataset = dataset.cache() \
.shuffle(BUFFER_SIZE, seed=RANDOM_SEED, reshuffle_each_iteration=True)\
.batch(BATCH_SIZE, drop_remainder=True ).prefetch(AUTOTUNE)
elif test:
dataset = dataset.cache() \
.batch(BATCH_SIZE, drop_remainder=False ).prefetch(AUTOTUNE)
else:
dataset = dataset.cache() \
.batch(BATCH_SIZE, drop_remainder=True ).prefetch(AUTOTUNE)
return dataset<prepare_x_and_y> | prev_refused_agg = prev_refused.groupby('SK_ID_CURR')['SK_ID_CURR'].count()
prev_refused_agg.shape, prev_amt_agg.shape | Home Credit Default Risk |
17,910,828 | train_ds = configure_dataset(train_ds, shuffle=True)
val_ds = configure_dataset(val_ds)
test_ds = configure_dataset(test_ds, test=True )<categorify> | pd.DataFrame(prev_refused_agg ) | Home Credit Default Risk |
17,910,828 | bert_preprocessor = hub.KerasLayer('https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', name='BERT_preprocesser')
bert_encoder = hub.KerasLayer('https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4', trainable=True, name='BERT_encoder')
nnlm_embed = hub.KerasLayer('https://tfhub.dev/google/nnlm-en-dim50/2', name='embedding_layer' )<categorify> | prev_refused_agg.reset_index(name='PREV_REFUSED_COUNT' ) | Home Credit Default Risk |
17,910,828 | def build_model() :
text_input = layers.Input(shape=() , dtype=tf.string, name='clean_text')
encoder_inputs = bert_preprocessor(text_input)
encoder_outputs = bert_encoder(encoder_inputs)
pooled_output = encoder_outputs["pooled_output"]
bert_dropout = layers.Dropout(0.1, name='BERT_dropout' )(pooled_output)
key_input = layers.Input(shape=() , dtype=tf.string, name='keyword_fill')
key_embed = nnlm_embed(key_input)
key_flat = layers.Flatten()(key_embed)
key_dense = layers.Dense(128, activation='elu', kernel_regularizer=regularizers.l2(1e-4))(key_flat)
key_dropout = layers.Dropout(0.5, name='dense_dropout' )(key_dense)
merge = layers.concatenate([bert_dropout, key_dropout])
dense = layers.Dense(128, activation='elu', kernel_regularizer=regularizers.l2(1e-4))(merge)
dropout = layers.Dropout(0.5, name='merged_dropout' )(dense)
clf = layers.Dense(1, activation='sigmoid', name='classifier' )(dropout)
return Model([text_input, key_input], clf, name='BERT_classifier' )<init_hyperparams> | prev_refused_agg = prev_refused_agg.reset_index(name='PREV_REFUSED_COUNT')
prev_amt_agg = prev_amt_agg.reset_index()
prev_amt_refused_agg = prev_amt_agg.merge(prev_refused_agg, on='SK_ID_CURR', how='left')
prev_amt_refused_agg.head(10 ) | Home Credit Default Risk |
17,910,828 | EPOCHS = 2
LEARNING_RATE = 5e-5
STEPS_PER_EPOCH = int(train_ds.unbatch().cardinality().numpy() / BATCH_SIZE)
VAL_STEPS = int(val_ds.unbatch().cardinality().numpy() / BATCH_SIZE)
TRAIN_STEPS = STEPS_PER_EPOCH * EPOCHS
WARMUP_STEPS = int(TRAIN_STEPS * 0.1)
adamw_optimizer = create_optimizer(
init_lr=LEARNING_RATE,
num_train_steps=TRAIN_STEPS,
num_warmup_steps=WARMUP_STEPS
)<train_model> | prev_amt_refused_agg['PREV_REFUSED_COUNT'].value_counts(dropna=False ) | Home Credit Default Risk |
17,910,828 | bert_classifier.compile(
loss=BinaryCrossentropy(from_logits=True),
optimizer= adamw_optimizer,
metrics=[BinaryAccuracy(name='accuracy')]
)
history = bert_classifier.fit(
train_ds,
epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,
validation_data= val_ds,
validation_steps=VAL_STEPS
)<find_best_params> | prev_amt_refused_agg = prev_amt_refused_agg.fillna(0)
prev_amt_refused_agg['PREV_REFUSE_RATIO'] = prev_amt_refused_agg['PREV_REFUSED_COUNT'] / prev_amt_refused_agg['PREV_SK_ID_CURR_COUNT']
prev_amt_refused_agg.head(10 ) | Home Credit Default Risk |
17,910,828 | train_loss = history.history['loss']
val_loss = history.history['val_loss']
train_acc = history.history['accuracy']
val_acc = history.history['val_accuracy']<define_variables> | prev_refused_appr_group = prev_app[prev_app['NAME_CONTRACT_STATUS'].isin(['Approved', 'Refused'])].groupby(['SK_ID_CURR', 'NAME_CONTRACT_STATUS'])
prev_refused_appr_agg = prev_refused_appr_group['SK_ID_CURR'].count().unstack()
prev_refused_appr_agg.head(10 ) | Home Credit Default Risk |
17,910,828 | val_target = np.asarray([i[1] for i in list(val_ds.unbatch().as_numpy_iterator())])
print(val_target.shape)
val_target[:5]<predict_on_test> | prev_refused_appr_agg = prev_refused_appr_agg.fillna(0)
prev_refused_appr_agg.columns = ['PREV_APPROVED_COUNT', 'PREV_REFUSED_COUNT']
prev_refused_appr_agg = prev_refused_appr_agg.reset_index()
prev_refused_appr_agg.head(10 ) | Home Credit Default Risk |
17,910,828 | val_predict = bert_classifier.predict(val_ds )<predict_on_test> | prev_agg = prev_amt_agg.merge(prev_refused_appr_agg, on='SK_ID_CURR', how='left')
prev_agg['PREV_REFUSED_RATIO'] = prev_agg['PREV_REFUSED_COUNT']/prev_agg['PREV_SK_ID_CURR_COUNT']
prev_agg['PREV_APPROVED_RATIO'] = prev_agg['PREV_APPROVED_COUNT']/prev_agg['PREV_SK_ID_CURR_COUNT']
prev_agg = prev_agg.drop(['PREV_REFUSED_COUNT', 'PREV_APPROVED_COUNT'], axis=1)
prev_agg.head(30 ) | Home Credit Default Risk |
17,910,828 | predictions = bert_classifier.predict(test_ds)
print(predictions.shape)
print(predictions[:5] )<prepare_output> | apps_all = get_apps_processed(apps ) | Home Credit Default Risk |
17,910,828 | predictions = np.where(predictions > THRESHOLD, 1, 0)
df_predictions = pd.DataFrame(predictions)
df_predictions.columns = ['target']
print(df_predictions.shape)
df_predictions.head()<save_to_csv> | print(apps_all.shape, prev_agg.shape)
apps_all = apps_all.merge(prev_agg, on='SK_ID_CURR', how='left')
print(apps_all.shape ) | Home Credit Default Risk |
17,910,828 | submission = pd.concat([df_test['id'], df_predictions], axis=1)
submission.to_csv('submission.csv', index=False )<load_from_csv> | object_columns = apps_all.dtypes[apps_all.dtypes == 'object'].index.tolist()
for column in object_columns:
apps_all[column] = pd.factorize(apps_all[column])[0] | Home Credit Default Risk |
17,910,828 | train_filepath = '/kaggle/input/nlp-getting-started/train.csv'
test_filepath = '/kaggle/input/nlp-getting-started/test.csv'
df_train = pd.read_csv(train_filepath)
df_test = pd.read_csv(test_filepath)
df_train.head()
<feature_engineering> | apps_all_train = apps_all[~apps_all['TARGET'].isnull() ]
apps_all_test = apps_all[apps_all['TARGET'].isnull() ]
apps_all_test = apps_all_test.drop('TARGET', axis=1 ) | Home Credit Default Risk |
17,910,828 | lemmatizer = WordNetLemmatizer()
for i in range(0, len(df_train)) :
text = re.sub('[^a-zA-Z]', ' ', df_train['text'][i])
text = text.lower()
text = re.sub(r'^https?:\/\/.*[\r
]*', '', text)
text = text.split()
text = [lemmatizer.lemmatize(word)for word in text if word not in stopwords.words('english')]
text = ' '.join(text)
df_train['text'][i] = text
df_train.head()<feature_engineering> | ftr_app = apps_all_train.drop(['SK_ID_CURR', 'TARGET'], axis=1)
target_app = apps_all_train['TARGET']
train_x, valid_x, train_y, valid_y = train_test_split(ftr_app, target_app, test_size=0.3, random_state=2020)
train_x.shape, valid_x.shape | Home Credit Default Risk |
17,910,828 | for i in range(0, len(df_test)) :
text = re.sub('[^a-zA-Z]', ' ', df_test['text'][i])
text = text.lower()
text = re.sub(r'^https?:\/\/.*[\r
]*', '', text)
text = text.split()
text = [lemmatizer.lemmatize(word)for word in text if word not in stopwords.words('english')]
text = ' '.join(text)
df_test['text'][i] = text
df_test.head()<save_to_csv> | clf = LGBMClassifier(
n_jobs=-1,
n_estimators=1000,
learning_rate=0.02,
num_leaves=32,
subsample=0.8,
max_depth=12,
silent=-1,
verbose=-1
)
clf.fit(train_x, train_y, eval_set=[(train_x, train_y),(valid_x, valid_y)], eval_metric= 'auc', verbose= 100,
early_stopping_rounds= 50 ) | Home Credit Default Risk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.