kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
8,206,048
warnings.filterwarnings("ignore") pd.set_option('display.max_columns', 100) pd.set_option('display.max_rows', 100) DATA_PATH = '.. /input/jane-street-market-prediction/' NFOLDS = 5 TRAIN = False CACHE_PATH = '.. /input/mlp012003weights' def save_pickle(dic, save_path): with open(save_path, 'wb')as f: pickle.dump(dic, f) def load_pickle(load_path): with open(load_path, 'rb')as f: message_dict = pickle.load(f) return message_dict feat_cols = [f'feature_{i}' for i in range(130)] target_cols = ['action', 'action_1', 'action_2', 'action_3', 'action_4'] f_mean = np.load(f'{CACHE_PATH}/f_mean_online.npy') all_feat_cols = [col for col in feat_cols] all_feat_cols.extend(['cross_41_42_43', 'cross_1_2']) class Model(nn.Module): def __init__(self): super(Model, self ).__init__() self.batch_norm0 = nn.BatchNorm1d(len(all_feat_cols)) self.dropout0 = nn.Dropout(0.2) dropout_rate = 0.2 hidden_size = 256 self.dense1 = nn.Linear(len(all_feat_cols), hidden_size) self.batch_norm1 = nn.BatchNorm1d(hidden_size) self.dropout1 = nn.Dropout(dropout_rate) self.dense2 = nn.Linear(hidden_size+len(all_feat_cols), hidden_size) self.batch_norm2 = nn.BatchNorm1d(hidden_size) self.dropout2 = nn.Dropout(dropout_rate) self.dense3 = nn.Linear(hidden_size+hidden_size, hidden_size) self.batch_norm3 = nn.BatchNorm1d(hidden_size) self.dropout3 = nn.Dropout(dropout_rate) self.dense4 = nn.Linear(hidden_size+hidden_size, hidden_size) self.batch_norm4 = nn.BatchNorm1d(hidden_size) self.dropout4 = nn.Dropout(dropout_rate) self.dense5 = nn.Linear(hidden_size+hidden_size, len(target_cols)) self.Relu = nn.ReLU(inplace=True) self.PReLU = nn.PReLU() self.LeakyReLU = nn.LeakyReLU(negative_slope=0.01, inplace=True) self.RReLU = nn.RReLU() def forward(self, x): x = self.batch_norm0(x) x = self.dropout0(x) x1 = self.dense1(x) x1 = self.batch_norm1(x1) x1 = self.LeakyReLU(x1) x1 = self.dropout1(x1) x = torch.cat([x, x1], 1) x2 = self.dense2(x) x2 = self.batch_norm2(x2) x2 = self.LeakyReLU(x2) x2 = self.dropout2(x2) x = torch.cat([x1, x2], 1) x3 = self.dense3(x) x3 = self.batch_norm3(x3) x3 = self.LeakyReLU(x3) x3 = self.dropout3(x3) x = torch.cat([x2, x3], 1) x4 = self.dense4(x) x4 = self.batch_norm4(x4) x4 = self.LeakyReLU(x4) x4 = self.dropout4(x4) x = torch.cat([x3, x4], 1) x = self.dense5(x) return x if True: device = torch.device("cpu") model_list = [] tmp = np.zeros(len(feat_cols)) for _fold in range(NFOLDS): torch.cuda.empty_cache() model = Model() model.to(device) model_weights = f"{CACHE_PATH}/online_model{_fold}.pth" model.load_state_dict(torch.load(model_weights, map_location=torch.device('cpu'))) model.eval() model_list.append(model )<choose_model_class>
model = get_model("xception", pretrained=False) model = nn.Sequential(*list(model.children())[:-1]) class Pooling(nn.Module): def __init__(self): super(Pooling, self ).__init__() self.p1 = nn.AdaptiveAvgPool2d(( 1,1)) self.p2 = nn.AdaptiveMaxPool2d(( 1,1)) def forward(self, x): x1 = self.p1(x) x2 = self.p2(x) return(x1+x2)* 0.5 model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(( 1,1))) class Head(torch.nn.Module): def __init__(self, in_f, out_f): super(Head, self ).__init__() self.f = nn.Flatten() self.l = nn.Linear(in_f, 512) self.d = nn.Dropout(0.5) self.o = nn.Linear(512, out_f) self.b1 = nn.BatchNorm1d(in_f) self.b2 = nn.BatchNorm1d(512) self.r = nn.ReLU() def forward(self, x): x = self.f(x) x = self.b1(x) x = self.d(x) x = self.l(x) x = self.r(x) x = self.b2(x) x = self.d(x) out = self.o(x) return out class FCN(torch.nn.Module): def __init__(self, base, in_f): super(FCN, self ).__init__() self.base = base self.h1 = Head(in_f, 1) def forward(self, x): x = self.base(x) return self.h1(x) net = [] model = FCN(model, 2048) model = model.cuda() model.load_state_dict(torch.load('.. /input/deepfake-kernel-data/model_50epochs_lr0001_patience5_factor01_batchsize32.pth')) net.append(model )
Deepfake Detection Challenge
8,206,048
SEED = 1111 np.random.seed(SEED) def create_mlp( num_columns, num_labels, hidden_units, dropout_rates, label_smoothing, learning_rate ): inp = tf.keras.layers.Input(shape=(num_columns,)) x = tf.keras.layers.BatchNormalization()(inp) x = tf.keras.layers.Dropout(dropout_rates[0] )(x) for i in range(len(hidden_units)) : x = tf.keras.layers.Dense(hidden_units[i] )(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation(tf.keras.activations.swish )(x) x = tf.keras.layers.Dropout(dropout_rates[i + 1] )(x) x = tf.keras.layers.Dense(num_labels )(x) out = tf.keras.layers.Activation("sigmoid" )(x) model = tf.keras.models.Model(inputs=inp, outputs=out) model.compile( optimizer=tfa.optimizers.RectifiedAdam(learning_rate=learning_rate), loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=label_smoothing), metrics=tf.keras.metrics.AUC(name="AUC"), ) return model epochs = 200 batch_size = 4096 hidden_units = [160, 160, 160] dropout_rates = [0.2, 0.2, 0.2, 0.2] label_smoothing = 1e-2 learning_rate = 1e-3 tf.keras.backend.clear_session() tf.random.set_seed(SEED) clf = create_mlp( len(feat_cols), 5, hidden_units, dropout_rates, label_smoothing, learning_rate ) clf.load_weights('.. /input/jane-street-with-keras-nn-overfit/model.h5') tf_models = [clf]<choose_model_class>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5 def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions) model.eval() predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
8,206,048
N_FEAT_TAGS = 29 DEVICE = device N_FEATURES = 130 THREE_HIDDEN_LAYERS = [400, 400, 400] class FFN(nn.Module): def __init__(self, inputCount=130, outputCount=5, hiddenLayerCounts=[150, 150, 150], drop_prob=0.2, nonlin=nn.SiLU() , isOpAct=False): super(FFN, self ).__init__() self.nonlin = nonlin self.dropout = nn.Dropout(drop_prob) self.batchnorm0 = nn.BatchNorm1d(inputCount) self.dense1 = nn.Linear(inputCount, hiddenLayerCounts[0]) self.batchnorm1 = nn.BatchNorm1d(hiddenLayerCounts[0]) self.dense2 = nn.Linear(hiddenLayerCounts[0], hiddenLayerCounts[1]) self.batchnorm2 = nn.BatchNorm1d(hiddenLayerCounts[1]) self.dense3 = nn.Linear(hiddenLayerCounts[1], hiddenLayerCounts[2]) self.batchnorm3 = nn.BatchNorm1d(hiddenLayerCounts[2]) self.outDense = None if outputCount > 0: self.outDense = nn.Linear(hiddenLayerCounts[-1], outputCount) self.outActivtn = None if isOpAct: if outputCount == 1 or outputCount == 2: self.outActivtn = nn.Sigmoid() elif outputCount > 0: self.outActivtn = nn.Softmax(dim=-1) return def forward(self, X): X = self.batchnorm0(X) X = self.dropout(self.nonlin(self.batchnorm1(self.dense1(X)))) X = self.dropout(self.nonlin(self.batchnorm2(self.dense2(X)))) X = self.dropout(self.nonlin(self.batchnorm3(self.dense3(X)))) if self.outDense: X = self.outDense(X) if self.outActivtn: X = self.outActivtn(X) return X class Emb_NN_Model(nn.Module): def __init__(self, three_hidden_layers=THREE_HIDDEN_LAYERS, embed_dim=(N_FEAT_TAGS), csv_file='.. /input/jane-street-market-prediction/features.csv'): super(Emb_NN_Model, self ).__init__() global N_FEAT_TAGS N_FEAT_TAGS = 29 dtype = {'tag_0' : 'int8'} for i in range(1, 29): k = 'tag_' + str(i) dtype[k] = 'int8' t_df = pd.read_csv(csv_file, usecols=range(1,N_FEAT_TAGS+1), dtype=dtype) t_df['tag_29'] = np.array([1] +([0] *(t_df.shape[0]-1)) ).astype('int8') self.features_tag_matrix = torch.tensor(t_df.to_numpy()) N_FEAT_TAGS += 1 self.embed_dim = embed_dim self.tag_embedding = nn.Embedding(N_FEAT_TAGS+1, embed_dim) self.tag_weights = nn.Linear(N_FEAT_TAGS, 1) drop_prob = 0.5 self.ffn = FFN(inputCount=(130+embed_dim), outputCount=0, hiddenLayerCounts=[(three_hidden_layers[0]+embed_dim),(three_hidden_layers[1]+embed_dim),(three_hidden_layers[2]+embed_dim)], drop_prob=drop_prob) self.outDense = nn.Linear(three_hidden_layers[2]+embed_dim, 5) return def features2emb(self): all_tag_idxs = torch.LongTensor(np.arange(N_FEAT_TAGS)) tag_bools = self.features_tag_matrix f_emb = self.tag_embedding(all_tag_idxs ).repeat(130, 1, 1) f_emb = f_emb * tag_bools[:, :, None] s = torch.sum(tag_bools, dim=1) f_emb = torch.sum(f_emb, dim=-2)/ s[:, None] return f_emb def forward(self, cat_featrs, features): cat_featrs = None features = features.view(-1, N_FEATURES) f_emb = self.features2emb() features_2 = torch.matmul(features, f_emb) features = torch.hstack(( features, features_2)) x = self.ffn(features) out_logits = self.outDense(x) return out_logits<load_pretrained>
!nvidia-smi
Deepfake Detection Challenge
8,206,048
<split><EOS>
submission_df_one = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df_one.to_csv("submission_xception.csv", index=False) fsub = 0.52*submission_df_resnext['label'] + 0.50*submission_df_one['label'] final = pd.DataFrame({'filename': test_videos, "label": fsub}) final.to_csv('submission.csv', index=False )
Deepfake Detection Challenge
8,138,725
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<concatenate>
!pip install.. /input/pytorchcv/pytorchcv-0.0.55-py2.py3-none-any.whl --quiet
Deepfake Detection Challenge
8,138,725
if True: for(test_df, pred_df)in tqdm(env_iter): if test_df['weight'].item() > 0: x_tt = test_df.loc[:, feat_cols].values if np.isnan(x_tt.sum()): x_tt = np.nan_to_num(x_tt)+ np.isnan(x_tt)* f_mean cross_41_42_43 = x_tt[:, 41] + x_tt[:, 42] + x_tt[:, 43] cross_1_2 = x_tt[:, 1] /(x_tt[:, 2] + 1e-5) feature_inp = np.concatenate(( x_tt, np.array(cross_41_42_43 ).reshape(x_tt.shape[0], 1), np.array(cross_1_2 ).reshape(x_tt.shape[0], 1), ), axis=1) torch_pred = np.zeros(( 1, len(target_cols))) for model in model_list: torch_pred += model(torch.tensor(feature_inp, dtype=torch.float ).to(device)).sigmoid().detach().cpu().numpy() / NFOLDS torch_pred = np.median(torch_pred) tf_pred = np.median(np.mean([model(x_tt, training = False ).numpy() for model in tf_models],axis=0)) x_tt = torch.tensor(x_tt ).float().view(-1, 130) embnn_p = np.median(torch.sigmoid(embNN_model(None, x_tt)).detach().cpu().numpy().reshape(( -1, 5)) , axis=1) pred_pr = torch_pred*0.42 + tf_pred*0.42 + embnn_p*0.16 pred_df.action = np.where(pred_pr >= 0.4978, 1, 0 ).astype(int) else: pred_df.action = 0 env.predict(pred_df )<train_model>
%matplotlib inline warnings.filterwarnings("ignore" )
Deepfake Detection Challenge
8,138,725
print("Done !" )<install_modules>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) len(test_videos )
Deepfake Detection Challenge
8,138,725
!pip --quiet install.. /input/treelite/treelite-0.93-py3-none-manylinux2010_x86_64.whl<install_modules>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
Deepfake Detection Challenge
8,138,725
!pip --quiet install.. /input/treelite/treelite_runtime-0.93-py3-none-manylinux2010_x86_64.whl<set_options>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,138,725
sns.set_context("talk") style.use('fivethirtyeight') pd.options.display.max_columns = None warnings.filterwarnings('ignore' )<import_modules>
frames_per_video = 20 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
8,138,725
class PurgedGroupTimeSeriesSplit(_BaseKFold): @_deprecate_positional_args def __init__(self, n_splits=5, *, max_train_group_size=np.inf, max_test_group_size=np.inf, group_gap=None, verbose=False ): super().__init__(n_splits, shuffle=False, random_state=None) self.max_train_group_size = max_train_group_size self.group_gap = group_gap self.max_test_group_size = max_test_group_size self.verbose = verbose def split(self, X, y=None, groups=None): if groups is None: raise ValueError( "The 'groups' parameter should not be None") X, y, groups = indexable(X, y, groups) n_samples = _num_samples(X) n_splits = self.n_splits group_gap = self.group_gap max_test_group_size = self.max_test_group_size max_train_group_size = self.max_train_group_size n_folds = n_splits + 1 group_dict = {} u, ind = np.unique(groups, return_index=True) unique_groups = u[np.argsort(ind)] n_samples = _num_samples(X) n_groups = _num_samples(unique_groups) for idx in np.arange(n_samples): if(groups[idx] in group_dict): group_dict[groups[idx]].append(idx) else: group_dict[groups[idx]] = [idx] if n_folds > n_groups: raise ValueError( ("Cannot have number of folds={0} greater than" " the number of groups={1}" ).format(n_folds, n_groups)) group_test_size = min(n_groups // n_folds, max_test_group_size) group_test_starts = range(n_groups - n_splits * group_test_size, n_groups, group_test_size) for group_test_start in group_test_starts: train_array = [] test_array = [] group_st = max(0, group_test_start - group_gap - max_train_group_size) for train_group_idx in unique_groups[group_st:(group_test_start - group_gap)]: train_array_tmp = group_dict[train_group_idx] train_array = np.sort(np.unique( np.concatenate(( train_array, train_array_tmp)) , axis=None), axis=None) train_end = train_array.size for test_group_idx in unique_groups[group_test_start: group_test_start + group_test_size]: test_array_tmp = group_dict[test_group_idx] test_array = np.sort(np.unique( np.concatenate(( test_array, test_array_tmp)) , axis=None), axis=None) test_array = test_array[group_gap:] if self.verbose > 0: pass yield [int(i)for i in train_array], [int(i)for i in test_array]<define_variables>
input_size = 150
Deepfake Detection Challenge
8,138,725
SEED = 42 START_DATE = 85 INPUT_DIR = '.. /input/janestreet-save-as-feather/' TRADING_THRESHOLD = 0.502<load_from_disk>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,138,725
%%time def load_data(input_dir=INPUT_DIR): train = pd.read_feather(pathlib.Path(input_dir + 'train.feather')) return train train = load_data(INPUT_DIR )<filter>
model = get_model("xception", pretrained=False) model = nn.Sequential(*list(model.children())[:-1]) model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(1)) class Head(torch.nn.Module): def __init__(self, in_f, out_f): super(Head, self ).__init__() self.f = nn.Flatten() self.l = nn.Linear(in_f, 512) self.d = nn.Dropout(0.5) self.o = nn.Linear(512, out_f) self.b1 = nn.BatchNorm1d(in_f) self.b2 = nn.BatchNorm1d(512) self.r = nn.ReLU() def forward(self, x): x = self.f(x) x = self.b1(x) x = self.d(x) x = self.l(x) x = self.r(x) x = self.b2(x) x = self.d(x) out = self.o(x) return out class FCN(torch.nn.Module): def __init__(self, base, in_f): super(FCN, self ).__init__() self.base = base self.h1 = Head(in_f, 1) def forward(self, x): x = self.base(x) return self.h1(x) net = [] model = FCN(model, 2048) model = model.cuda() model.load_state_dict(torch.load('.. /input/deepfake-xception3/xception2.pth')) net.append(model )
Deepfake Detection Challenge
8,138,725
train = train.query(f'date > {START_DATE}') train.fillna(train.mean() ,inplace=True) train = train[train['weight'] != 0]<define_variables>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5
Deepfake Detection Challenge
8,138,725
features = train.columns[train.columns.str.startswith('feature')].values.tolist() print('{} features used'.format(len(features)) )<feature_engineering>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
8,138,725
train['action'] =(train['resp'] > 0 ).astype('int') f_mean = np.mean(train[features[1:]].values,axis=0 )<init_hyperparams>
speed_test = False
Deepfake Detection Challenge
8,138,725
params = {'n_estimators': 473, 'max_depth': 7, 'min_child_weight': 6, 'learning_rate': 0.015944928866056352, 'subsample': 0.608128483148888, 'gamma': 0, 'colsample_bytree': 0.643875232059528,'objective':'binary:logistic', 'eval_metric': 'auc','tree_method': 'gpu_hist', 'random_state': 42,} params_1 = {'n_estimators': 494, 'max_depth': 8, 'min_child_weight': 6, 'learning_rate': 0.009624384025871735, 'subsample': 0.8328412036014541, 'gamma': 0, 'colsample_bytree': 0.715303237773365, 'objective':'binary:logistic', 'eval_metric': 'auc','tree_method': 'gpu_hist', 'random_state': 42,}<split>
if speed_test: start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
8,138,725
training = True if training: resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4'] X = train[features].values y = np.stack([(train[c] > 0 ).astype('int')for c in resp_cols] ).T groups = train['date'].values models = [] scores = [] cv = PurgedGroupTimeSeriesSplit( n_splits=4, group_gap=20, ) for t in tqdm(range(y.shape[1])) : yy = y[:,t] for i,(train_index, valid_index)in enumerate(cv.split( X, yy, groups=groups)) : print(f'Target {t} Fold {i} started at {time.ctime() }') X_train, X_valid = X[train_index], X[valid_index] y_train, y_valid = yy[train_index], yy[valid_index] model = xgb.XGBClassifier(**params_1, n_jobs = -1) model.fit(X_train, y_train, eval_set=[(X_valid, y_valid)], eval_metric='auc', verbose=100, callbacks = [xgb.callback.EarlyStopping(rounds=300,save_best=True)]) pred = model.predict(X_valid) score = roc_auc_score(y_valid,pred) model.save_model(f'my_model_{t}_{i}.model') models.append(model) scores.append(score) del score, model print(scores) del X_train, X_valid, y_train, y_valid rubbish = gc.collect()<load_pretrained>
%%time model.eval() predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
8,138,725
<load_pretrained><EOS>
submission_df = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df.to_csv("submission.csv", index=False )
Deepfake Detection Challenge
8,127,756
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<predict_on_test>
from IPython.display import Image
Deepfake Detection Challenge
8,127,756
if training: predictor_0 = treelite_runtime.Predictor(f'./mymodel_{0}.so', verbose=True) predictor_1 = treelite_runtime.Predictor(f'./mymodel_{1}.so', verbose=True) predictor_2 = treelite_runtime.Predictor(f'./mymodel_{2}.so', verbose=True) predictor_3 = treelite_runtime.Predictor(f'./mymodel_{3}.so', verbose=True) predictor_4 = treelite_runtime.Predictor(f'./mymodel_{4}.so', verbose=True )<define_variables>
Image('.. /input/deepfake-kernel-data/google_cloud_compute_engine_launch_vm.png' )
Deepfake Detection Challenge
8,127,756
env = janestreet.make_env() iter_test = env.iter_test()<categorify>
Image('.. /input/deepfake-kernel-data/google_cloud_vm.png' )
Deepfake Detection Challenge
8,127,756
f = np.median index_features = [n for n in range(1,(len(features)+ 1)) ] for(test_df, pred_df)in tqdm(iter_test): if test_df['weight'].item() > 0: x_tt = test_df.values[0][index_features].reshape(1,-1) if np.isnan(x_tt[:, 1:].sum()): x_tt[:, 1:] = np.nan_to_num(x_tt[:, 1:])+ np.isnan(x_tt[:, 1:])* f_mean batch = treelite_runtime.Batch.from_npy2d(x_tt) pred_0 = predictor_0.predict(batch) pred_1 = predictor_1.predict(batch) pred_2 = predictor_2.predict(batch) pred_3 = predictor_3.predict(batch) pred_4 = predictor_4.predict(batch) pred = np.stack([pred_0,pred_1,pred_2,pred_3,pred_4],axis=0 ).T pred = f(pred) pred_df.action = int(pred >= TRADING_THRESHOLD) else: pred_df['action'].values[0] = 0 env.predict(pred_df )<import_modules>
Image('.. /input/deepfake-kernel-data/lr_15e-2_epochs_42_patience_5.png' )
Deepfake Detection Challenge
8,127,756
import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.impute import SimpleImputer from sklearn.preprocessing import OrdinalEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import accuracy_score from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import KFold from xgboost import XGBRegressor from sklearn.pipeline import Pipeline from sklearn.compose import ColumnTransformer from sklearn.model_selection import RandomizedSearchCV from sklearn.metrics import mean_absolute_error from sklearn.preprocessing import StandardScaler from skopt import BayesSearchCV from skopt.space import Real, Categorical, Integer import time<load_from_csv>
Image('.. /input/deepfake-kernel-data/lr_2e-3_epochs_10_patience_5.png' )
Deepfake Detection Challenge
8,127,756
X = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv', index_col='Id') X_test = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv', index_col='Id') X<prepare_x_and_y>
Image('.. /input/deepfake-kernel-data/lr_2e-3_epochs_20_patience_5.png' )
Deepfake Detection Challenge
8,127,756
y = X.SalePrice X.drop(columns=['SalePrice'], axis=1, inplace=True )<count_unique_values>
Image('.. /input/deepfake-kernel-data/lr_4e-3_epochs_12_patience_2.png' )
Deepfake Detection Challenge
8,127,756
X.isnull().sum() , X.nunique() , X.dtypes<define_variables>
Image('.. /input/deepfake-kernel-data/lr_4e-3_epochs_30_patience_2.png' )
Deepfake Detection Challenge
8,127,756
cat_cols = [cname for cname in X.columns if X[cname].dtype == "object"] num_cols = [cname for cname in X.columns if X[cname].dtype in ['int64', 'float64']]<categorify>
Image('.. /input/deepfake-kernel-data/google_cloud_vm_deepfake_training_screenshot.png' )
Deepfake Detection Challenge
8,127,756
num_transf = Pipeline(steps=[ ('imputer', SimpleImputer()), ('sca', StandardScaler()) ]) cat_transf = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='most_frequent')) , ('onehot', OneHotEncoder(handle_unknown='ignore')) ]) preprocessor = ColumnTransformer( transformers=[ ('num', num_transf, num_cols), ('cat', cat_transf, cat_cols) ]) model = XGBRegressor(tree_method='gpu_hist', use_label_encoder=False, eval_metric='rmse', random_state=7) pipe = Pipeline(steps=[('preprocessor', preprocessor), ('model', model) ] )<init_hyperparams>
%matplotlib inline warnings.filterwarnings("ignore" )
Deepfake Detection Challenge
8,127,756
params = {'model__n_estimators':np.arange(100, 800, 10),'model__learning_rate':np.arange(0.001, 0.2, 0.001), 'model__max_depth':np.arange(3, 12, 1)} kfold = KFold(n_splits=7, shuffle=True, random_state=7) scan = RandomizedSearchCV(pipe, params, n_iter=100, scoring='neg_root_mean_squared_error', cv=kfold, random_state=7) result = scan.fit(X, y) print("Best: %f using %s" %(-1*result.best_score_, result.best_params_)) <find_best_params>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) frame_h = 5 frame_l = 5 len(test_videos )
Deepfake Detection Challenge
8,127,756
best_parameters = dict(result.best_params_.copy()) for k in best_parameters.keys() : best_parameters[k.replace("model__","")] = best_parameters.pop(k) best_parameters['reg_alpha'] = best_parameters.pop('model__reg_alpha') best_parameters['n_estimators'] = best_parameters.pop('model__n_estimators') best_parameters<save_to_csv>
print("PyTorch version:", torch.__version__) print("CUDA version:", torch.version.cuda) print("cuDNN version:", torch.backends.cudnn.version() )
Deepfake Detection Challenge
8,127,756
model_scan = XGBRegressor(**best_parameters, eval_metric='rmse', use_label_encoder=False, tree_method='gpu_hist') pipe_scan = Pipeline(steps=[('preprocessing', preprocessor),('model', model_scan)]) pipe_scan.fit(X, y) pred_scan = pipe_scan.predict(X_test) output = pd.DataFrame({'Id': X_test.index, 'SalePrice': pred_scan}) output.to_csv('submission.csv', index=False) output<import_modules>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
8,127,756
import pandas as pd from tqdm import tqdm<load_from_csv>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,127,756
train = pd.read_csv('.. /input/ames-housing-dataset/AmesHousing.csv') train.drop(['PID'], axis=1, inplace=True) origin = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv') train.columns = origin.columns test = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv') submission = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/sample_submission.csv') print('Train:{} Test:{}'.format(train.shape,test.shape))<drop_column>
frames_per_video = 64 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
8,127,756
missing = test.isnull().sum() missing = missing[missing>0] train.drop(missing.index, axis=1, inplace=True) train.drop(['Electrical'], axis=1, inplace=True) test.dropna(axis=1, inplace=True) test.drop(['Electrical'], axis=1, inplace=True )<feature_engineering>
input_size = 224
Deepfake Detection Challenge
8,127,756
l_test = tqdm(range(0, len(test)) , desc='Matching') for i in l_test: for j in range(0, len(train)) : for k in range(1, len(test.columns)) : if test.iloc[i,k] == train.iloc[j,k]: continue else: break else: submission.iloc[i, 1] = train.iloc[j, -1] break l_test.close()<save_to_csv>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,127,756
submission.to_csv('submission.csv', index=False )<save_to_csv>
class MyResNeXt(models.resnet.ResNet): def __init__(self, training=True): super(MyResNeXt, self ).__init__(block=models.resnet.Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4) self.fc = nn.Linear(2048, 1 )
Deepfake Detection Challenge
8,127,756
submission.to_csv('submission.csv', index=False )<set_options>
checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu) model = MyResNeXt().to(gpu) model.load_state_dict(checkpoint) _ = model.eval() del checkpoint
Deepfake Detection Challenge
8,127,756
sns.set() sns.set_style('darkgrid') def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn<load_from_csv>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5
Deepfake Detection Challenge
8,127,756
training_data = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/train.csv" )<load_from_csv>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
8,127,756
testing_data = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv' )<train_model>
speed_test = False
Deepfake Detection Challenge
8,127,756
print("Training data shape: ",training_data.shape) print("Testing data shape: ",testing_data.shape )<define_variables>
if speed_test: start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
8,127,756
test_id = testing_data["Id"]<drop_column>
predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
8,127,756
training_data.drop("Id", axis = 1, inplace = True) testing_data.drop("Id", axis = 1, inplace = True )<train_model>
submission_df_resnext = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df_resnext.to_csv("submission_resnext.csv", index=False )
Deepfake Detection Challenge
8,127,756
print("Training data shape: ",training_data.shape) print("Testing data shape: ",testing_data.shape )<feature_engineering>
!pip install.. /input/deepfake-xception-trained-model/pytorchcv-0.0.55-py2.py3-none-any.whl --quiet
Deepfake Detection Challenge
8,127,756
training_data["SalePrice"] = np.log1p(training_data["SalePrice"] )<prepare_x_and_y>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) len(test_videos )
Deepfake Detection Challenge
8,127,756
n_train = training_data.shape[0] n_test = testing_data.shape[0] y_train = training_data.SalePrice.iloc[1:] all_data = pd.concat(( training_data,testing_data)).reset_index(drop = True) all_data.drop(['SalePrice'],axis = 1, inplace = True) print('All Data size: {}'.format(all_data.shape))<create_dataframe>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
8,127,756
all_data_na =(all_data.isnull().sum() / len(all_data)) * 100 all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index ).sort_values(ascending=False)[:30] missing_data = pd.DataFrame({"Missing Ratio" : all_data_na}) missing_data.head(30 )<data_type_conversions>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,127,756
all_data["PoolQC"] = all_data["PoolQC"].fillna("None" )<data_type_conversions>
frames_per_video = 64 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
8,127,756
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None" )<data_type_conversions>
input_size = 150
Deepfake Detection Challenge
8,127,756
all_data["Alley"] = all_data["Alley"].fillna("None" )<data_type_conversions>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,127,756
all_data["Fence"] = all_data["Fence"].fillna("None" )<data_type_conversions>
model = get_model("xception", pretrained=False) model = nn.Sequential(*list(model.children())[:-1]) class Pooling(nn.Module): def __init__(self): super(Pooling, self ).__init__() self.p1 = nn.AdaptiveAvgPool2d(( 1,1)) self.p2 = nn.AdaptiveMaxPool2d(( 1,1)) def forward(self, x): x1 = self.p1(x) x2 = self.p2(x) return(x1+x2)* 0.5 model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(( 1,1))) class Head(torch.nn.Module): def __init__(self, in_f, out_f): super(Head, self ).__init__() self.f = nn.Flatten() self.l = nn.Linear(in_f, 512) self.d = nn.Dropout(0.5) self.o = nn.Linear(512, out_f) self.b1 = nn.BatchNorm1d(in_f) self.b2 = nn.BatchNorm1d(512) self.r = nn.ReLU() def forward(self, x): x = self.f(x) x = self.b1(x) x = self.d(x) x = self.l(x) x = self.r(x) x = self.b2(x) x = self.d(x) out = self.o(x) return out class FCN(torch.nn.Module): def __init__(self, base, in_f): super(FCN, self ).__init__() self.base = base self.h1 = Head(in_f, 1) def forward(self, x): x = self.base(x) return self.h1(x) net = [] model = FCN(model, 2048) model = model.cuda() model.load_state_dict(torch.load('.. /input/deepfake-kernel-data/model_50epochs_lr0001_patience5_factor01_batchsize32.pth')) net.append(model )
Deepfake Detection Challenge
8,127,756
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None" )<categorify>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5
Deepfake Detection Challenge
8,127,756
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform( lambda x: x.fillna(x.median()))<data_type_conversions>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
8,127,756
col =("GarageType" , "GarageFinish" , "GarageQual" ,"GarageCond") for c in col: all_data[c] = all_data[c].fillna("None" )<data_type_conversions>
speed_test = False
Deepfake Detection Challenge
8,127,756
col2 =("GarageYrBlt" , "GarageArea" , "GarageCars") for c2 in col2: all_data[c2] = all_data[c2].fillna(0 )<data_type_conversions>
if speed_test: start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
8,127,756
col3 =("BsmtFinSF1" , "BsmtFinSF2" , "BsmtUnfSF", "TotalBsmtSF" , "BsmtFullBath" , "BsmtHalfBath") for c3 in col3: all_data[c3] = all_data[c3].fillna(0 )<data_type_conversions>
%%time model.eval() predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
8,127,756
col4 =("BsmtQual" , "BsmtCond", "BsmtExposure" , "BsmtFinType1" , "BsmtFinType2") for c4 in col4: all_data[c4] = all_data[c4].fillna("None" )<data_type_conversions>
submission_df_xception = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df_xception.to_csv("submission_xception.csv", index=False )
Deepfake Detection Challenge
8,127,756
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None") all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0 )<data_type_conversions>
submission_df = pd.DataFrame({"filename": test_videos}) submission_df["label"] = 0.51*submission_df_resnext["label"] + 0.5*submission_df_xception["label"]
Deepfake Detection Challenge
8,127,756
<drop_column><EOS>
submission_df.to_csv("submission.csv", index=False )
Deepfake Detection Challenge
8,044,856
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<data_type_conversions>
%matplotlib inline warnings.filterwarnings("ignore" )
Deepfake Detection Challenge
8,044,856
all_data["Functional"] = all_data['Functional'].fillna('Typ' )<data_type_conversions>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) frame_h = 5 frame_l = 5 len(test_videos )
Deepfake Detection Challenge
8,044,856
all_data["Electrical"] = all_data['Electrical'].fillna(all_data['Electrical'].mode() [0] )<data_type_conversions>
print("PyTorch version:", torch.__version__) print("CUDA version:", torch.version.cuda) print("cuDNN version:", torch.backends.cudnn.version() )
Deepfake Detection Challenge
8,044,856
all_data["KitchenQual"] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode() [0] )<categorify>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
8,044,856
all_data["Exterior1st"] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode() [0]) all_data["Exterior2nd"] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode() [0] )<data_type_conversions>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,044,856
all_data["SaleType"] = all_data['SaleType'].fillna(all_data['SaleType'].mode() [0] )<data_type_conversions>
frames_per_video = 64 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
8,044,856
all_data["MSSubClass"] = all_data['MSSubClass'].fillna("None" )<create_dataframe>
input_size = 224
Deepfake Detection Challenge
8,044,856
all_data_na =(all_data.isnull().sum() / len(all_data)) * 100 all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index ).sort_values(ascending = False) missing_data = pd.DataFrame({"Missing Ratio" : all_data_na}) missing_data.head(30 )<feature_engineering>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,044,856
all_data["MSSubClass"] = all_data["MSSubClass"].apply(str )<data_type_conversions>
class MyResNeXt(models.resnet.ResNet): def __init__(self, training=True): super(MyResNeXt, self ).__init__(block=models.resnet.Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4) self.fc = nn.Linear(2048, 1 )
Deepfake Detection Challenge
8,044,856
all_data["OverallCond"] = all_data["OverallCond"].astype(str )<data_type_conversions>
checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu) model = MyResNeXt().to(gpu) model.load_state_dict(checkpoint) _ = model.eval() del checkpoint
Deepfake Detection Challenge
8,044,856
all_data["YrSold"] = all_data["YrSold"].astype(str) all_data["MoSold"] = all_data["MoSold"].astype(str )<import_modules>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5
Deepfake Detection Challenge
8,044,856
from sklearn.preprocessing import LabelEncoder<define_variables>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
8,044,856
cols =('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond', 'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1', 'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope', 'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond', 'YrSold', 'MoSold' )<categorify>
speed_test = False
Deepfake Detection Challenge
8,044,856
for c in cols: lbl = LabelEncoder() lbl.fit(list(all_data[c].values)) all_data[c] = lbl.transform(list(all_data[c].values))<feature_engineering>
if speed_test: start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
8,044,856
all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF']<define_variables>
predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
8,044,856
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index<feature_engineering>
submission_df_resnext = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df_resnext.to_csv("submission_resnext.csv", index=False )
Deepfake Detection Challenge
8,044,856
skewness = skewness[abs(skewness)> 0.75] print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0])) skewed_features = skewness.index lam = 0.15 for feat in skewed_features: all_data[feat] = boxcox1p(all_data[feat], lam )<categorify>
!pip install.. /input/deepfake-xception-trained-model/pytorchcv-0.0.55-py2.py3-none-any.whl --quiet
Deepfake Detection Challenge
8,044,856
all_data = pd.get_dummies(all_data) print(all_data.shape )<split>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) len(test_videos )
Deepfake Detection Challenge
8,044,856
train = all_data[1:n_train] test = all_data[n_train:]<import_modules>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
Deepfake Detection Challenge
8,044,856
from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.model_selection import KFold, cross_val_score, train_test_split import xgboost as xgb import lightgbm as lgb from sklearn.metrics import mean_squared_error<train_model>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,044,856
regressor = RandomForestRegressor(n_estimators = 100, random_state = 0) regressor.fit(train, y_train )<predict_on_test>
frames_per_video = 64 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
8,044,856
Y_pred = regressor.predict(test) Y_pred<train_model>
input_size = 150
Deepfake Detection Challenge
8,044,856
regressor = DecisionTreeRegressor(random_state = 0) regressor.fit(train, y_train )<predict_on_test>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,044,856
y_pred = regressor.predict(test) print("Predicted price: ", y_pred )<create_dataframe>
model = get_model("xception", pretrained=False) model = nn.Sequential(*list(model.children())[:-1]) class Pooling(nn.Module): def __init__(self): super(Pooling, self ).__init__() self.p1 = nn.AdaptiveAvgPool2d(( 1,1)) self.p2 = nn.AdaptiveMaxPool2d(( 1,1)) def forward(self, x): x1 = self.p1(x) x2 = self.p2(x) return(x1+x2)* 0.5 model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(( 1,1))) class Head(torch.nn.Module): def __init__(self, in_f, out_f): super(Head, self ).__init__() self.f = nn.Flatten() self.l = nn.Linear(in_f, 512) self.d = nn.Dropout(0.5) self.o = nn.Linear(512, out_f) self.b1 = nn.BatchNorm1d(in_f) self.b2 = nn.BatchNorm1d(512) self.r = nn.ReLU() def forward(self, x): x = self.f(x) x = self.b1(x) x = self.d(x) x = self.l(x) x = self.r(x) x = self.b2(x) x = self.d(x) out = self.o(x) return out class FCN(torch.nn.Module): def __init__(self, base, in_f): super(FCN, self ).__init__() self.base = base self.h1 = Head(in_f, 1) def forward(self, x): x = self.base(x) return self.h1(x) net = [] model = FCN(model, 2048) model = model.cuda() model.load_state_dict(torch.load('.. /input/deepfake-xception-trained-model/model.pth')) net.append(model )
Deepfake Detection Challenge
8,044,856
ft = pd.DataFrame({"Actual" : y_train,"predict" : y_pred}) ft<import_modules>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.5
Deepfake Detection Challenge
8,044,856
from xgboost import XGBRegressor<train_model>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
8,044,856
xgb_clf = XGBRegressor(n_estimators=1000, learning_rate=0.05) xgb_clf.fit(train, y_train )<compute_train_metric>
speed_test = False
Deepfake Detection Challenge
8,044,856
xgb_clf_cv = cross_val_score(xgb_clf,train, y_train, cv=10) print(xgb_clf_cv.mean() )<train_model>
if speed_test: start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
8,044,856
xgb_clf = XGBRegressor(n_estimators=1000, learning_rate=0.05) xgb_clf.fit(train, y_train )<predict_on_test>
%%time model.eval() predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
8,044,856
xgb_predictions_test = xgb_clf.predict(test) <create_dataframe>
submission_df_xception = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df_xception.to_csv("submission_xception.csv", index=False )
Deepfake Detection Challenge
8,044,856
ft1 = pd.DataFrame({"Actual" : y_train,"predict" : xgb_predictions_test} )<import_modules>
submission_df = pd.DataFrame({"filename": test_videos} )
Deepfake Detection Challenge
8,044,856
from sklearn.metrics import mean_squared_error<import_modules>
r1 = 0.46441 r2 = 0.52189 total = r1 + r2 r11 = r1/total r22 = r2/total
Deepfake Detection Challenge
8,044,856
from sklearn.metrics import mean_squared_error<compute_test_metric>
submission_df["label"] = r22*submission_df_resnext["label"] + r11*submission_df_xception["label"]
Deepfake Detection Challenge
8,044,856
<choose_model_class><EOS>
submission_df.to_csv("submission.csv", index=False )
Deepfake Detection Challenge
7,930,779
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<create_dataframe>
%matplotlib inline
Deepfake Detection Challenge
7,930,779
ft2 = pd.DataFrame({"Actual" : y_train,"predict" : lgb_pred} )<create_dataframe>
frames_per_vid = [17, 25, 30, 32, 35, 36, 40] public_LB = [0.46788, 0.46776, 0.46611, 0.46542, 0.46643, 0.46484, 0.46635] df_viz = pd.DataFrame({'frames_per_vid': frames_per_vid, 'public_LB':public_LB} )
Deepfake Detection Challenge
7,930,779
submission = pd.DataFrame({"Id" : test_id, "SalePrice" : lgb_pred} )<save_to_csv>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) frame_h = 5 frame_l = 5 len(test_videos )
Deepfake Detection Challenge
7,930,779
submission.to_csv("sub.csv",index = False )<import_modules>
print("PyTorch version:", torch.__version__) print("CUDA version:", torch.version.cuda) print("cuDNN version:", torch.backends.cudnn.version() )
Deepfake Detection Challenge
7,930,779
import numpy as np import pandas as pd<load_from_csv>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
7,930,779
train= pd.read_csv(".. /input/ames-housing-dataset/AmesHousing.csv") train.shape<drop_column>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge