kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
8,546,370
columns = [col for col in train.columns.to_list() if col not in ['id','target']]<prepare_x_and_y>
print("PyTorch version:", torch.__version__) print("CUDA version:", torch.version.cuda) print("cuDNN version:", torch.backends.cudnn.version() )
Deepfake Detection Challenge
8,546,370
data=train[columns] target=train['target']<split>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
8,546,370
def objective(trial,data=data,target=target): train_x, test_x, train_y, test_y = train_test_split(data, target, test_size=0.15,random_state=42) param = { 'tree_method':'gpu_hist', 'lambda': trial.suggest_loguniform('lambda', 1e-3, 10.0), 'alpha': trial.suggest_loguniform('alpha', 1e-3, 10.0), 'colsample_bytree': trial.suggest_categorical('colsample_bytree', [0.3,0.4,0.5,0.6,0.7,0.8,0.9, 1.0]), 'subsample': trial.suggest_categorical('subsample', [0.4,0.5,0.6,0.7,0.8,1.0]), 'learning_rate': trial.suggest_categorical('learning_rate', [0.008,0.009,0.01,0.012,0.014,0.016,0.018, 0.02]), 'n_estimators': 4000, 'max_depth': trial.suggest_categorical('max_depth', [5,7,9,11,13,15,17,20]), 'random_state': trial.suggest_categorical('random_state', [24, 48,2020]), 'min_child_weight': trial.suggest_int('min_child_weight', 1, 300), } model = xgb.XGBRegressor(**param) model.fit(train_x,train_y,eval_set=[(test_x,test_y)],early_stopping_rounds=100,verbose=False) preds = model.predict(test_x) rmse = mean_squared_error(test_y, preds,squared=False) return rmse<init_hyperparams>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,546,370
Best_trial= {'lambda': 0.0042687338951820425, 'alpha': 6.2637008222060935, 'colsample_bytree': 0.4, 'subsample': 0.6, 'n_estimators': 4000, 'learning_rate': 0.01, 'max_depth': 11, 'random_state': 2020, 'min_child_weight': 171, 'tree_method':'gpu_hist' }<train_model>
frames_per_video = 64 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
8,546,370
preds = np.zeros(test.shape[0]) kf = KFold(n_splits=5,random_state=48,shuffle=True) rmse=[] models = [] n=0 for trn_idx, test_idx in kf.split(train[columns],train['target']): X_tr,X_val=train[columns].iloc[trn_idx],train[columns].iloc[test_idx] y_tr,y_val=train['target'].iloc[trn_idx],train['target'].iloc[test_idx] model = xgb.XGBRegressor(**Best_trial) model.fit(X_tr,y_tr,eval_set=[(X_val,y_val)],early_stopping_rounds=100,verbose=False) preds+=model.predict(test[columns])/kf.n_splits rmse.append(mean_squared_error(y_val, model.predict(X_val), squared=False)) models.append(model) print(n+1,rmse[n]) n+=1<save_to_csv>
input_size = 224
Deepfake Detection Challenge
8,546,370
sub['target']=preds sub.to_csv('submission.csv', index=False )<import_modules>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,546,370
import matplotlib.pyplot as plt<load_from_csv>
class MyResNeXt(models.resnet.ResNet): def __init__(self, training=True): super(MyResNeXt, self ).__init__(block=models.resnet.Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4) self.fc = nn.Linear(2048, 1 )
Deepfake Detection Challenge
8,546,370
train_df = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/train.csv", index_col=["id"]) test_df = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/test.csv", index_col=["id"]) X = train_df.iloc[:, :-1].to_numpy() y = train_df.iloc[:, -1].to_numpy() X_test = test_df.to_numpy()<train_model>
checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu) model = MyResNeXt().to(gpu) model.load_state_dict(checkpoint) _ = model.eval() del checkpoint
Deepfake Detection Challenge
8,546,370
def train(model): X_train, X_test, y_train, y_test = train_test_split(X, y.flatten() , test_size=0.1, random_state=156) y_train = y_train.reshape(-1, 1) y_test = y_test.reshape(-1, 1) model = model.fit(X_train, y_train, early_stopping_rounds=100, verbose=False, eval_set=[(X_test, y_test)]) score = mean_squared_error(model.predict(X_train), y_train, squared=False) print(score) return model<compute_train_metric>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.481
Deepfake Detection Challenge
8,546,370
def objectiveXGB(trial: Trial, X, y, test): param = { "n_estimators" : trial.suggest_int('n_estimators', 500, 4000), 'max_depth':trial.suggest_int('max_depth', 8, 16), 'min_child_weight':trial.suggest_int('min_child_weight', 1, 300), 'gamma':trial.suggest_int('gamma', 1, 3), 'learning_rate': 0.01, 'colsample_bytree':trial.suggest_discrete_uniform('colsample_bytree',0.5, 1, 0.1), 'nthread' : -1, 'tree_method': 'gpu_hist', 'predictor': 'gpu_predictor', 'lambda': trial.suggest_loguniform('lambda', 1e-3, 10.0), 'alpha': trial.suggest_loguniform('alpha', 1e-3, 10.0), 'subsample': trial.suggest_categorical('subsample', [0.6,0.7,0.8,1.0]), 'random_state': 42 } X_train, X_test, y_train, y_test = train_test_split(X, y.flatten() , test_size=0.1) y_train = y_train.reshape(-1, 1) y_test = y_test.reshape(-1, 1) model = xgb.XGBRegressor(**param) xgb_model = model.fit(X_train, y_train, verbose=False, eval_set=[(X_test, y_test)]) score = mean_squared_error(xgb_model.predict(X_test), y_test, squared=False) return score<train_on_grid>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
8,546,370
study = optuna.create_study(direction='minimize',sampler=TPESampler()) study.optimize(lambda trial : objectiveXGB(trial, X, y, X_test), n_trials=50) print('Best trial: score {}, params {}'.format(study.best_trial.value,study.best_trial.params)) best_param = study.best_trial.params xgbReg = train(xgb.XGBRegressor(**best_param, tree_method='gpu_hist', random_state=42, predictor='gpu_predictor', learning_rate=0.01, nthread=-1)) <compute_train_metric>
predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
8,546,370
def objectiveLGBM(trial: Trial, X, y, test): param = { 'objective': 'regression', 'metric': 'root_mean_squared_error', 'verbosity': -1, 'boosting_type': 'gbdt', 'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0), 'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0), 'num_leaves': trial.suggest_int('num_leaves', 2, 512), 'learning_rate': 0.01, 'n_estimators': trial.suggest_int('n_estimators', 700, 3000), 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100), 'device':"gpu", 'gpu_use_dp':True } X_train, X_test, y_train, y_test = train_test_split(X, y.flatten() , test_size=0.1) lgbm_regr = LGBMRegressor(**param) lgbm_regr = lgbm_regr.fit(X_train, y_train, verbose=False) score = mean_squared_error(lgbm_regr.predict(X_test), y_test, squared=False) return score<train_on_grid>
submission_df_resnext = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df_resnext.to_csv("submission_resnext.csv", index=False )
Deepfake Detection Challenge
8,546,370
study = optuna.create_study(direction='minimize',sampler=TPESampler()) study.optimize(lambda trial : objectiveLGBM(trial, X, y, X_test), n_trials=20) print('Best trial: score {}, params {}'.format(study.best_trial.value,study.best_trial.params)) best_param2 = study.best_trial.params lgbm = LGBMRegressor(**best_param2, device="gpu",gpu_use_dp=True, objective='regression', metric='root_mean_squared_error', learning_rate= 0.01, boosting_type='gbdt') <train_on_grid>
!pip install.. /input/deepfake-xception-trained-model/pytorchcv-0.0.55-py2.py3-none-any.whl --quiet
Deepfake Detection Challenge
8,546,370
final_model = xgb.XGBRegressor(n_estimators= 2000, max_depth= 16,tree_method='gpu_hist', predictor='gpu_predictor') sgd = SGDRegressor(max_iter=1000) hgb = HistGradientBoostingRegressor(max_depth=3, min_samples_leaf=1) cat = CatBoostRegressor(task_type="GPU", verbose=False) estimators = [ lgbm, cat, sgd, hgb, xgbReg ] S_train, S_test = stacking(estimators, X, y, X_test, regression=True, metric=mean_squared_error, n_folds=5, shuffle=False, random_state=0, verbose=2) final_model.fit(S_train, y) print(mean_squared_error(final_model.predict(S_train), y, squared=False))<save_to_csv>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) len(test_videos )
Deepfake Detection Challenge
8,546,370
submission = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/test.csv", index_col=["id"]) y_hat = final_model.predict(S_test) submission["target"] = y_hat submission[["target"]].to_csv("/kaggle/working/submission_stacking.csv") joblib.dump(final_model, '/kaggle/working/skacking.pkl' )<save_to_csv>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
8,546,370
submission = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/test.csv", index_col=["id"]) lgbm = LGBMRegressor(**best_param2, device="gpu",gpu_use_dp=True, objective='regression', learning_rate= 0.01, metric='root_mean_squared_error', boosting_type='gbdt') lgbm = lgbm.fit(X, y, verbose=False) y_hat = lgbm.predict(submission.to_numpy()) print(mean_squared_error(lgbm.predict(X), y, squared=False)) submission["target"] = y_hat submission[["target"]].to_csv("/kaggle/working/submission_lgbm.csv") joblib.dump(lgbm, '/kaggle/working/lgbm.pkl' )<save_to_csv>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,546,370
submission = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/test.csv", index_col=["id"]) params = {'n_estimators': 3520, 'max_depth': 11, 'min_child_weight': 231, 'gamma': 2, 'colsample_bytree': 0.7, 'lambda': 0.014950936465569798, 'alpha': 0.28520156840812494, 'subsample': 0.6} xgbReg = train(xgb.XGBRegressor(**params, tree_method='gpu_hist', random_state=42, predictor='gpu_predictor', learning_rate=0.01, nthread=-1)) y_hat = xgbReg.predict(submission.to_numpy()) print(mean_squared_error(xgbReg.predict(X), y, squared=False)) submission["target"] = y_hat submission[["target"]].to_csv("/kaggle/working/submission_xgb.csv") joblib.dump(xgbReg, '/kaggle/working/xgb_reg.pkl' )<set_options>
frames_per_video = 64 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
8,546,370
sns.set(font_scale=1.4) warnings.filterwarnings("ignore") <compute_test_metric>
input_size = 150
Deepfake Detection Challenge
8,546,370
def root_mean_squared_error(y_true, y_pred): return K.sqrt(K.mean(K.square(y_pred - y_true)) )<categorify>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,546,370
def add_pca(train_df, test_df, cols, n_comp=20, fit_test = True, prefix='pca_', fit_test_first=False): pca = PCA(n_components=n_comp, random_state=42) pca_titles = [prefix+'_pca_'+str(x)for x in range(n_comp)] temp_train = train_df.copy() temp_test = test_df.copy() for c in cols: fv = temp_train[c].mean() temp_train[c] = temp_train[c].fillna(value=fv) fv = temp_test[c].mean() temp_test[c] = temp_test[c].fillna(value=fv) for p in pca_titles: train_df[p] = 0.0 test_df[p] = 0.0 if fit_test==True: pca_data = pd.concat([temp_train[cols], temp_test[cols]], axis=0) if fit_test_first==True: pca.fit(pca_data[len(train_df):]) pca_data = pca.transform(pca_data) else: pca_data = pca.fit_transform(pca_data) train_df.loc[:, pca_titles] = pca_data[0:len(train_df)] test_df.loc[:, pca_titles] = pca_data[len(train_df):] return train_df, test_df, pca_titles def split_distributions(train_df, test_df, cols, target, n_comp=20, prefix='gm', add_labels=False): gm = GaussianMixture(n_components=n_comp,covariance_type='full', tol=0.001, reg_covar=1e-06, max_iter=100, n_init=1, init_params='kmeans', weights_init=None, means_init=None, precisions_init=None, random_state=42, warm_start=False, verbose=0, verbose_interval=10) temp_train = train_df.copy() temp_test = test_df.copy() gm_data = pd.concat([temp_train[[cols]], temp_test[[cols]]], axis=0 ).reset_index(drop=True) gm.fit(gm_data) gm_data['labels'] = gm.predict(gm_data) if add_labels: gm_titles = [prefix+str(x)for x in range(n_comp)] train_df[gm_titles]=0 test_df[gm_titles]=0 dummies = pd.get_dummies(gm_data['labels'], prefix=prefix) train_df.loc[:, gm_titles] = dummies[0:len(train_df)].values test_df.loc[:, gm_titles] = dummies[len(train_df):].values else: gm_titles = [] train_df[prefix+'_label'] = gm.labels_[0:len(train_df)] test_df[prefix+'_label'] = gm.labels_[len(train_df):] means = train_df.groupby([prefix+'_label'])[target].mean() train_df['menc_'+prefix+'_label'] = train_df[prefix+'_label'].map(means) test_df['menc_'+prefix+'_label'] = test_df[prefix+'_label'].map(means) gm_titles+=[prefix+'_label', 'menc_'+prefix+'_label'] return train_df, test_df, gm_titles def assign_fold(df, label_column,fold_column, NFOLDS=5): skf = StratifiedKFold(n_splits=NFOLDS) df[fold_column]=0 f=0 for trn_idx, val_idx in skf.split(df, df[label_column]): df.loc[val_idx, 'fold']=f f+=1 df[fold_column].value_counts() return df def st_scale(train_df, test_df, cols): StSc = StandardScaler() train_df[cols] = StSc.fit_transform(train_df[cols]) test_df[cols] = StSc.transform(test_df[cols]) return train_df, test_df def seed_everything(seed=1234): random.seed(seed) np.random.seed(seed) tf.random.set_seed(seed) os.environ['PYTHONHASHSEED'] = str(seed )<load_from_csv>
model = get_model("xception", pretrained=False) model = nn.Sequential(*list(model.children())[:-1]) class Pooling(nn.Module): def __init__(self): super(Pooling, self ).__init__() self.p1 = nn.AdaptiveAvgPool2d(( 1,1)) self.p2 = nn.AdaptiveMaxPool2d(( 1,1)) def forward(self, x): x1 = self.p1(x) x2 = self.p2(x) return(x1+x2)* 0.5 model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(( 1,1))) class Head(torch.nn.Module): def __init__(self, in_f, out_f): super(Head, self ).__init__() self.f = nn.Flatten() self.l = nn.Linear(in_f, 512) self.d = nn.Dropout(0.5) self.o = nn.Linear(512, out_f) self.b1 = nn.BatchNorm1d(in_f) self.b2 = nn.BatchNorm1d(512) self.r = nn.ReLU() def forward(self, x): x = self.f(x) x = self.b1(x) x = self.d(x) x = self.l(x) x = self.r(x) x = self.b2(x) x = self.d(x) out = self.o(x) return out class FCN(torch.nn.Module): def __init__(self, base, in_f): super(FCN, self ).__init__() self.base = base self.h1 = Head(in_f, 1) def forward(self, x): x = self.base(x) return self.h1(x) net = [] model = FCN(model, 2048) model = model.cuda() model.load_state_dict(torch.load('.. /input/deepfake-kernel-data/model_50epochs_lr0001_patience5_factor01_batchsize32.pth')) net.append(model )
Deepfake Detection Challenge
8,546,370
PATH = '/kaggle/input/tabular-playground-series-jan-2021/' train = pd.read_csv(PATH+'train.csv') test = pd.read_csv(PATH+'test.csv') submission = pd.read_csv(PATH+'sample_submission.csv') FT_COLS = [x for x in train.columns if 'cont' in x] TARGET='target' print(train.shape) train.head(10 )<define_variables>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.481
Deepfake Detection Challenge
8,546,370
original_feature_dict = {'cont1': 5, 'cont2': 10, 'cont3': 8, 'cont4': 8, 'cont5': 5, 'cont6': 6, 'cont7': 4, 'cont8': 3, 'cont9': 7, 'cont10':3, 'cont11': 4, 'cont12': 2, 'cont13': 3, 'cont14': 5,} pca_dict = { 'extra_pca_0': 4, 'extra_pca_1': 1, 'extra_pca_2': 1, 'extra_pca_3': 2, 'extra_pca_4': 1, 'extra_pca_5': 1 } MULTIPLIER = 3.0 original_feature_dict = {a:int(b*MULTIPLIER)for(a,b)in original_feature_dict.items() } ft_dict = {**original_feature_dict, **pca_dict} ft_dict<split>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
8,546,370
mixture_title_cols=[] for f in FT_COLS+pca_titles2: train, test, titles = split_distributions(train, test,f,TARGET, n_comp=ft_dict[f], prefix=f+'_dim', add_labels=True) mixture_title_cols+=titles mixture_value_cols = [] for count,f in enumerate(FT_COLS+pca_titles2): for d in range(ft_dict[f]): t_median = train[f][train[f+'_dim'+str(d)]==1].median() t_std = train[f][train[f+'_dim'+str(d)]==1].std() train[f+'_dim'+str(d)+'_dist'] = np.where(train[f+'_dim'+str(d)]==1, (train[f] - t_median)/t_std, np.nan) test[f+'_dim'+str(d)+'_dist'] = np.where(test[f+'_dim'+str(d)]==1, (test[f] - t_median)/t_std, np.nan) mixture_value_cols+=[f+'_dim'+str(d)+'_dist']<define_variables>
speed_test = False
Deepfake Detection Challenge
8,546,370
print('Total Original Features', len(FT_COLS)) print('Total Submixtures Labels', len(mixture_title_cols)) print('Total Submixtures Values', len(mixture_value_cols)) print('Total Feature Columns', len(FT_COLS)+len(mixture_title_cols)+len(mixture_value_cols))<data_type_conversions>
%%time model.eval() predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
8,546,370
NAN_VALUE = 0.0 for d in mixture_value_cols: train[d] = train[d].fillna(value=NAN_VALUE) test[d] = test[d].fillna(value=NAN_VALUE )<normalization>
submission_df_xception = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df_xception.to_csv("submission_xception.csv", index=False )
Deepfake Detection Challenge
8,546,370
SCALE = True if SCALE: train, test = st_scale(train, test, FT_COLS) for f in FT_COLS: train[f] = np.clip(train[f], -2, 2) test[f] = np.clip(test[f], -2, 2) SCALE_DISTS = True if SCALE_DISTS: for d in mixture_value_cols: TEMP_MAX = np.abs(train[d] ).max() train[d] = train[d] / TEMP_MAX test[d] = test[d] / TEMP_MAX<feature_engineering>
submission_df = pd.DataFrame({"filename": test_videos}) submission_df["label"] = 0.53*submission_df_resnext["label"] + 0.49*submission_df_xception["label"]
Deepfake Detection Challenge
8,546,370
<compute_train_metric><EOS>
submission_df.to_csv("submission.csv", index=False )
Deepfake Detection Challenge
8,526,543
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<train_model>
from IPython.display import Image
Deepfake Detection Challenge
8,526,543
def run_training(model, train_df, test_df,sample_submission, fold_col, orig_features, mixture_val_cols,mixture_label_cols, target_col, benchmark, outlier_col=None, nn=False, epochs=10,batch_size=32, verbose=False, dense=70, dout=0.15, dense_reg = 0.000001,act='elu'): FOLD_VALUES = sorted([x for x in train_df[fold_col].unique() ]) oof = np.zeros(( len(train_df),)) test_predictions = np.zeros(( len(sample_submission),)) fig,axes=plt.subplots(figsize=(15,6)) axes.set_ylim(0.69,0.75) RANDOM_SEEDS = [0,42,100,1000] for rs in RANDOM_SEEDS: seed_everything(seed=rs) for fold in FOLD_VALUES: print(' --- ') print('running random seed', rs, 'fold', fold) if outlier_col: trn_idx =(train_df[fold_col]!=fold)&(~train_df[outlier_col]) else: trn_idx = train_df[fold_col]!=fold val_idx = train_df[fold_col]==fold X_train_orig = train_df.loc[trn_idx,orig_features].values X_train = train_df.loc[trn_idx,mixture_val_cols].values X_train_mask = train_df.loc[trn_idx,mixture_label_cols].values y_train = train_df.loc[trn_idx, target_col].values X_val_orig = train_df.loc[val_idx,orig_features].values X_val = train_df.loc[val_idx,mixture_val_cols].values X_val_mask = train_df.loc[val_idx,mixture_label_cols].values y_val = train_df.loc[val_idx, target_col].values model = keras_model(orig_features, mixture_val_cols,mixture_label_cols,dout=dout, dense=dense,act=act, dense_reg = dense_reg, descend_fraction = 0.9) K.clear_session() back_count = 5 epoch_list=list(range(epochs-back_count, epochs)) class CustomModelCheckpoint(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs=None): if epoch in epoch_list: self.model.save_weights('model_fold_'+str(fold)+'_epoch_'+str(epoch)+'.h5', overwrite=True) else: pass cbk = CustomModelCheckpoint() history = model.fit([X_train_orig, X_train,X_train_mask ], y_train, epochs=epochs, batch_size=batch_size,shuffle=True, validation_data=([X_val_orig, X_val,X_val_mask], y_val), verbose=verbose, callbacks=[cbk]) print('Fold Last Epoch Train Error', history.history['root_mean_squared_error'][-1]) print('Fold Last Epoch Valid Error', history.history['val_root_mean_squared_error'][-1]) model.save_weights('model_final_'+str(fold)+'.h5') sns.lineplot(x=range(epochs), y=history.history['loss'], color='Blue') sns.lineplot(x=range(epochs), y=history.history['val_loss'], color='Red') val_preds = np.zeros(( len(X_val),)) for e in epoch_list: model = keras_model(orig_features, mixture_val_cols,mixture_label_cols,dout=dout, dense=dense,act=act, dense_reg = dense_reg, descend_fraction = 0.9) model.load_weights('model_fold_'+str(fold)+'_epoch_'+str(e)+'.h5') val_preds += model.predict([X_val_orig, X_val,X_val_mask] ).mean(axis=1) test_predictions += model.predict([test_df[orig_features].values,test_df[mixture_val_cols].values, test_df[mixture_label_cols].values] ).mean(axis=1) val_preds = val_preds / len(epoch_list) oof[val_idx]+=val_preds val_error = np.sqrt(mse(y_val, val_preds)) print('Fold multi epoch weight prediction error', val_error) oof = oof / len(RANDOM_SEEDS) total_val_error = np.sqrt(mse(train_df[target_col], oof)) print('final OOF MSE', total_val_error) fold_errors = [] for fold in train_df[fold_col].unique() : val_idx = train_df[fold_col]==fold fold_errors+=[np.sqrt(mse(train_df.loc[val_idx, target_col].values, oof[val_idx])) ] test_predictions = test_predictions/(NFOLDS * len(RANDOM_SEEDS)* len(epoch_list)) return oof, test_predictions, fold_errors<choose_model_class>
Image('.. /input/deepfake-kernel-data/google_cloud_compute_engine_launch_vm.png' )
Deepfake Detection Challenge
8,526,543
def keras_model(ft_orig, mixture_values, mixture_labels, n_layer=3,bnorm=True,dout=0.2, dense=20,act='elu', dense_reg = 0.000001, descend_fraction = 0.9): input1 = L.Input(shape=(len(ft_orig)) , name='input_orig') input1_do = L.Dropout(0.1 )(input1) XA = L.Dense(dense, activation=act, activity_regularizer=tf.keras.regularizers.L2(dense_reg), name='dense_orig' )(input1_do) if bnorm: XA = L.BatchNormalization(name='bn_1' )(XA) XA1 = L.Dropout(dout, name='do_orig' )(XA) input2 = L.Input(shape=(len(mixture_values)) , name='input_new') input3 = L.Input(shape=(len(mixture_labels)) , name='input_new2') all_input_combo = tf.keras.layers.Concatenate(axis=1, name='concat_i2' )([input2, input3]) input2_do = L.Dropout(0.2 )(all_input_combo) XB = L.Dense(dense, activation=act, activity_regularizer=tf.keras.regularizers.L2(dense_reg), name='dense_new' )(input2_do) if bnorm: XB = L.BatchNormalization(name='bn_2' )(XB) XB1 = L.Dropout(dout, name='do_new' )(XB) all_input_combo = tf.keras.layers.Concatenate(axis=1, name='concat_new' )([XA1, XB1]) X = L.Dense(int(dense), activation=act, activity_regularizer=tf.keras.regularizers.L2(dense_reg))(all_input_combo) if bnorm: X = L.BatchNormalization(name='bn_3' )(X) X2 = L.Dropout(dout, name='do_3' )(X) X2 = tf.keras.layers.concatenate([all_input_combo, X2], axis=1) X = L.Dense(int(( descend_fraction**2)*dense), activation=act, activity_regularizer=tf.keras.regularizers.L2(dense_reg))(X2) if bnorm: X = L.BatchNormalization()(X) X3 = L.Dropout(dout )(X) X3 = tf.keras.layers.concatenate([X2, X3], axis=1) X = L.Dense(int(( descend_fraction**3)*dense), activation=act, activity_regularizer=tf.keras.regularizers.L2(dense_reg *2))(X3) if bnorm: X = L.BatchNormalization()(X) X4 = L.Dropout(dout )(X) X4 = tf.keras.layers.concatenate([X3, X4],axis=1) X = L.Dense(int(( descend_fraction**4)*dense), activation=act, activity_regularizer=tf.keras.regularizers.L2(dense_reg*4))(X4) if bnorm: X = L.BatchNormalization()(X) X5 = L.Dropout(dout )(X) X5 = tf.keras.layers.concatenate([X4, X5], axis=1) X = L.Dense(5, activation=act, activity_regularizer=tf.keras.regularizers.L2(0.0001))(X5) output1 = L.Dense(1, activation='linear' )(X) model = tf.keras.Model(inputs=[input1, input2, input3], outputs=output1) model.compile(loss=root_mean_squared_error, optimizer=tfa.optimizers.AdamW(learning_rate=0.000809028893821181, weight_decay=9.83479875802558E-06), metrics=tf.keras.metrics.RootMeanSquaredError()) return model K.clear_session() model = keras_model(FT_COLS, mixture_title_cols, mixture_value_cols, dense=70, dout=0.15, dense_reg = 0.000001, act='elu', descend_fraction = 0.9) model.summary()<train_model>
Image('.. /input/deepfake-kernel-data/google_cloud_vm.png' )
Deepfake Detection Challenge
8,526,543
oof, test_predictions, fold_errors = run_training(model, train, test,submission, 'fold', FT_COLS, mixture_value_cols, mixture_title_cols, TARGET, cv_bm, outlier_col='outlier_filter', epochs=25, batch_size=256, verbose=False, dense=70, dout=0.15, dense_reg = 0.000001, act='elu', )<save_to_csv>
Image('.. /input/deepfake-kernel-data/lr_15e-2_epochs_42_patience_5.png' )
Deepfake Detection Challenge
8,526,543
print('Save Out of Fold Predictions') oof = pd.DataFrame(columns=['oof_prediction'], index=train['id'], data=oof + TARGET_MEAN) oof.to_csv('oof_predictions.csv', index=True) oof.head(10 )<compute_test_metric>
Image('.. /input/deepfake-kernel-data/lr_2e-3_epochs_10_patience_5.png' )
Deepfake Detection Challenge
8,526,543
print('fold errors', fold_errors) print('fold error std', np.array(fold_errors ).std() )<save_to_csv>
Image('.. /input/deepfake-kernel-data/lr_2e-3_epochs_20_patience_5.png' )
Deepfake Detection Challenge
8,526,543
submission['target'] = test_predictions + TARGET_MEAN submission.to_csv('submission.csv', index=False) submission.head(5 )<import_modules>
Image('.. /input/deepfake-kernel-data/lr_4e-3_epochs_12_patience_2.png' )
Deepfake Detection Challenge
8,526,543
import numpy as np import pandas as pd from math import sqrt from xgboost import XGBRegressor from lightgbm import LGBMRegressor from sklearn.impute import SimpleImputer from sklearn.ensemble import VotingRegressor from sklearn.feature_selection import VarianceThreshold from sklearn.metrics import make_scorer, mean_squared_error from sklearn.model_selection import GridSearchCV, ParameterGrid, train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import FunctionTransformer, StandardScaler<define_variables>
Image('.. /input/deepfake-kernel-data/lr_4e-3_epochs_30_patience_2.png' )
Deepfake Detection Challenge
8,526,543
RANDOM_STATE = 2021 CROSS_VALIDATION = 3<define_variables>
Image('.. /input/deepfake-kernel-data/google_cloud_vm_deepfake_training_screenshot.png' )
Deepfake Detection Challenge
8,526,543
data_dir = '/kaggle/input/tabular-playground-series-jan-2021'<load_from_csv>
%matplotlib inline warnings.filterwarnings("ignore" )
Deepfake Detection Challenge
8,526,543
df = pd.read_csv(f"{data_dir}/train.csv" ).set_index('id' ).convert_dtypes() display(df.shape) df.head(2 )<split>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) frame_h = 5 frame_l = 5 len(test_videos )
Deepfake Detection Challenge
8,526,543
X = df.copy() y = X.pop('target') X_train, X_valid, y_train, y_valid = train_test_split( X, y, train_size=0.8, test_size=0.2, random_state=RANDOM_STATE, )<define_variables>
print("PyTorch version:", torch.__version__) print("CUDA version:", torch.version.cuda) print("cuDNN version:", torch.backends.cudnn.version() )
Deepfake Detection Challenge
8,526,543
preprocessor = Pipeline(steps=[ ('imputer', SimpleImputer()), ('log', FunctionTransformer(np.log1p)) , ('scaler', StandardScaler()), ] )<choose_model_class>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
8,526,543
pipeline = Pipeline([ ('preprocessor', preprocessor), ('variance_drop', VarianceThreshold(threshold=(0.95 *(1 - 0.95)))) , ('voting', 'passthrough'), ] )<choose_model_class>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,526,543
parameters = [ { 'voting': [VotingRegressor([ ('lgbm', LGBMRegressor(random_state=RANDOM_STATE)) , ('xgb', XGBRegressor(random_state=RANDOM_STATE)) ])], 'voting__lgbm__n_estimators': [2000], 'voting__lgbm__max_depth': [12], 'voting__lgbm__learning_rate': [0.01], 'voting__lgbm__num_leaves': [256], 'voting__lgbm__min_child_weight': [12], 'voting__lgbm__feature_fraction': [0.4], 'voting__lgbm__bagging_fraction': [0.7], 'voting__lgbm__bagging_freq': [5], 'voting__lgbm__min_child_samples': [32], 'voting__lgbm__lambda_l1':[9], 'voting__lgbm__lambda_l2': [0.13], 'voting__xgb__n_estimators': [2000], 'voting__xgb__max_depth': [12], 'voting__xgb__learning_rate': [0.01], 'voting__xgb__alpha': [5], 'voting__xgb__gamma': [3], 'voting__xgb__lambda': [3], 'voting__xgb__subsample': [0.8], 'voting__xgb__colsample_bytree': [0.4], } ]<find_best_params>
frames_per_video = 64 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
8,526,543
total = CROSS_VALIDATION * len(ParameterGrid(parameters)) display(f"Number of combination that will be run by the GridSearch: {total}" )<compute_test_metric>
input_size = 224
Deepfake Detection Challenge
8,526,543
custom_scoring = make_scorer( score_func=lambda y, y_pred: mean_squared_error(y, y_pred, squared=False), greater_is_better=False, )<choose_model_class>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,526,543
grid_search = GridSearchCV( pipeline, param_grid=parameters, cv=CROSS_VALIDATION, scoring=custom_scoring, n_jobs=-1, verbose=True, )<train_on_grid>
class MyResNeXt(models.resnet.ResNet): def __init__(self, training=True): super(MyResNeXt, self ).__init__(block=models.resnet.Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4) self.fc = nn.Linear(2048, 1 )
Deepfake Detection Challenge
8,526,543
grid_search.fit(X_train, y_train )<find_best_params>
checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu) model = MyResNeXt().to(gpu) model.load_state_dict(checkpoint) _ = model.eval() del checkpoint
Deepfake Detection Challenge
8,526,543
display(abs(grid_search.best_score_)) display(grid_search.best_params_ )<compute_train_metric>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.481
Deepfake Detection Challenge
8,526,543
preds = grid_search.best_estimator_.predict(X_valid) mean_squared_error(y_valid, preds, squared=False )<load_from_csv>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
8,526,543
X_test = pd.read_csv(f"{data_dir}/test.csv" ).set_index('id' ).convert_dtypes() display(X_test.shape) X_test.head(2 )<save_to_csv>
speed_test = False
Deepfake Detection Challenge
8,526,543
preds_test = grid_search.best_estimator_.predict(X_test) output = pd.DataFrame( {'Id': X_test.index, 'target': preds_test}) output.to_csv(f"submission.csv", index=False )<install_modules>
if speed_test: start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
8,526,543
!pip install pytorch_tabular !pip install torch_optimizer<import_modules>
predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
8,526,543
import numpy as np import pandas as pd import time import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import KFold from sklearn.metrics import mean_squared_error from sklearn.preprocessing import StandardScaler from pytorch_tabular import TabularModel from pytorch_tabular.models import CategoryEmbeddingModelConfig, NodeConfig, TabNetModelConfig from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig, ExperimentConfig from pytorch_tabular.categorical_encoders import CategoricalEmbeddingTransformer from torch_optimizer import QHAdam import category_encoders as ce from lightgbm import LGBMRegressor from catboost import CatBoostRegressor<load_from_csv>
submission_df_resnext = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df_resnext.to_csv("submission_resnext.csv", index=False )
Deepfake Detection Challenge
8,526,543
df_train = pd.read_csv('.. /input/tabular-playground-series-jan-2021/train.csv') display(df_train.head()) df_test = pd.read_csv('.. /input/tabular-playground-series-jan-2021/test.csv') display(df_test.head()) features = ['cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13', 'cont14']<categorify>
!pip install.. /input/deepfake-xception-trained-model/pytorchcv-0.0.55-py2.py3-none-any.whl --quiet
Deepfake Detection Challenge
8,526,543
enc = KBinsDiscretizer(n_bins=10, encode='ordinal', strategy="quantile") enc.fit(df_train[features]) binned_df_train = enc.transform(df_train[features]) binned_df_test = enc.transform(df_test[features]) for i, feature in enumerate(features): df_train[f"{feature}_binned"] = binned_df_train[:,i] df_test[f"{feature}_binned"] = binned_df_test[:,i]<define_variables>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) len(test_videos )
Deepfake Detection Challenge
8,526,543
def get_configs(train): epochs = 25 batch_size = 512 steps_per_epoch = int(( len(train)//batch_size)*0.9) data_config = DataConfig( target=['target'], continuous_cols=['cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13', 'cont14'], categorical_cols=['cont1_binned', 'cont2_binned', 'cont3_binned', 'cont4_binned', 'cont5_binned', 'cont6_binned', 'cont7_binned', 'cont8_binned', 'cont9_binned', 'cont10_binned', 'cont11_binned', 'cont12_binned', 'cont13_binned', 'cont14_binned'], continuous_feature_transform="quantile_normal" ) trainer_config = TrainerConfig( auto_lr_find=False, batch_size=batch_size, max_epochs=epochs, gpus=1, ) optimizer_config = OptimizerConfig(lr_scheduler="OneCycleLR", lr_scheduler_params={"max_lr":0.005, "epochs": epochs, "steps_per_epoch":steps_per_epoch}) model_config = NodeConfig( task="regression", num_layers=2, num_trees=1024, depth=6, embed_categorical=True, learning_rate = 1e-3, target_range=[(df_train[col].min() ,df_train[col].max())for col in ['target']] ) return data_config, trainer_config, optimizer_config, model_config<drop_column>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
8,526,543
rnd_seed_cv = 1234 rnd_seed_reg = 1234 kf = KFold(n_splits=5, random_state=rnd_seed_cv, shuffle=True) df_train.drop(columns='id', inplace=True) df_test.drop(columns='id', inplace=True) df_test['target'] = 0<train_model>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,526,543
def node(train, valid, df_test): data_config, trainer_config, optimizer_config, model_config = get_configs(train) tabular_model = TabularModel( data_config=data_config, model_config=model_config, optimizer_config=optimizer_config, trainer_config=trainer_config ) tabular_model.fit(train=train, validation=valid, optimizer=QHAdam, optimizer_params={"nus":(0.7, 1.0), "betas":(0.95, 0.998)}) result = tabular_model.evaluate(valid) return np.sqrt(result[0]["test_mean_squared_error"]), tabular_model.predict(valid)["target_prediction"].values, tabular_model.predict(df_test)["target_prediction"].values, tabular_model def lgbm(train, valid, df_test): lgb_model = LGBMRegressor(n_estimators=10000, learning_rate=0.005, early_stopping_rounds=50, feature_pre_filter = False, num_leaves=102, min_child_samples=20, colsample_bytree = 0.4, subsample = 1, subsample_freq = 0, lambda_l1 = 4.6, lambda_l2 = 1.9, random_state=42) lgb_model.fit(train.drop(columns='target'), train['target'], eval_set=(valid.drop(columns='target'),valid.loc[:,'target'])) lgb_preds = lgb_model.predict(valid.drop(columns='target')) score = mean_squared_error(valid['target'].values, lgb_preds, squared=False) return score, lgb_model.predict(valid.drop(columns='target')) , lgb_model.predict(df_test.drop(columns='target')) def cb(train, valid, df_test): best_params = { 'grow_policy': 'Lossguide', 'boosting_type': 'Plain', 'depth': 20, 'l2_leaf_reg': 3.699746597668451, 'min_data_in_leaf': 4, 'random_strength': 4.9263987954247455, 'rsm': 1.0, "n_estimators": 10000, "learning_rate": 0.5, "od_type": "Iter", "od_wait": 50, } catboost_model = CatBoostRegressor(**best_params) catboost_model.fit(train.drop(columns='target'), train['target'], eval_set=(valid.drop(columns='target'),valid.loc[:,'target'])) cb_preds = catboost_model.predict(valid.drop(columns='target')) score = mean_squared_error(valid['target'].values, cb_preds, squared=False) return score, catboost_model.predict(valid.drop(columns='target')) , catboost_model.predict(df_test.drop(columns='target'))<feature_engineering>
frames_per_video = 64 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
8,526,543
CV_node = [] CV_lgb = [] CV_cb = [] preds_train_node = [] preds_train_lgb = [] preds_train_cb = [] preds_test_node = [] preds_test_lgb = [] preds_test_cb = [] cross_validated_preds = [] t1 = time.time() for train_index, test_index in kf.split(df_train): train = df_train.iloc[train_index] valid = df_train.iloc[test_index] cv_val = valid.copy() node_score, node_train_pred, node_test_pred, tabular_model = node(train, valid, df_test) CV_node.append(node_score) cv_val['pred_node'] = node_train_pred preds_train_node.append(node_train_pred) preds_test_node.append(node_test_pred) transformer = CategoricalEmbeddingTransformer(tabular_model) train_transform = transformer.fit_transform(train) val_transform = transformer.transform(valid) df_test_transform = transformer.transform(df_test) lgbm_score, lgbm_train_pred, lgbm_test_pred = lgbm(train_transform, val_transform, df_test_transform) CV_lgb.append(lgbm_score) cv_val['pred_lgb'] = lgbm_train_pred preds_train_lgb.append(lgbm_train_pred) preds_test_lgb.append(lgbm_test_pred) cb_score, cb_train_pred, cb_test_pred = cb(train_transform, val_transform, df_test_transform) CV_cb.append(cb_score) cv_val['pred_cb'] = cb_train_pred preds_train_cb.append(cb_train_pred) preds_test_cb.append(cb_test_pred) cross_validated_preds.append(cv_val) t2 = time.time() print('Elapsed time [s]: ', t2-t1 )<compute_test_metric>
input_size = 150
Deepfake Detection Challenge
8,526,543
print('CV performance [RMSE]: ', np.mean(CV_node, axis=0)) print('CV performance [RMSE]: ', np.mean(CV_lgb, axis=0)) print('CV performance [RMSE]: ', np.mean(CV_cb, axis=0))<save_to_csv>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,526,543
cross_val_pred_df = pd.concat(cross_validated_preds, sort=False) cross_val_pred_df.to_csv("cross_val_preds.csv") joblib.dump(preds_test_node, "preds_test_node.sav") joblib.dump(preds_test_lgb, "preds_test_lgb.sav") joblib.dump(preds_test_cb, "preds_test_cb.sav" )<prepare_output>
model = get_model("xception", pretrained=False) model = nn.Sequential(*list(model.children())[:-1]) class Pooling(nn.Module): def __init__(self): super(Pooling, self ).__init__() self.p1 = nn.AdaptiveAvgPool2d(( 1,1)) self.p2 = nn.AdaptiveMaxPool2d(( 1,1)) def forward(self, x): x1 = self.p1(x) x2 = self.p2(x) return(x1+x2)* 0.5 model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(( 1,1))) class Head(torch.nn.Module): def __init__(self, in_f, out_f): super(Head, self ).__init__() self.f = nn.Flatten() self.l = nn.Linear(in_f, 512) self.d = nn.Dropout(0.5) self.o = nn.Linear(512, out_f) self.b1 = nn.BatchNorm1d(in_f) self.b2 = nn.BatchNorm1d(512) self.r = nn.ReLU() def forward(self, x): x = self.f(x) x = self.b1(x) x = self.d(x) x = self.l(x) x = self.r(x) x = self.b2(x) x = self.d(x) out = self.o(x) return out class FCN(torch.nn.Module): def __init__(self, base, in_f): super(FCN, self ).__init__() self.base = base self.h1 = Head(in_f, 1) def forward(self, x): x = self.base(x) return self.h1(x) net = [] model = FCN(model, 2048) model = model.cuda() model.load_state_dict(torch.load('.. /input/deepfake-kernel-data/model_50epochs_lr0001_patience5_factor01_batchsize32.pth')) net.append(model )
Deepfake Detection Challenge
8,526,543
avg_cb_pred = np.mean(preds_test_cb, axis=0) avg_lgb_pred = np.mean(preds_test_lgb, axis=0) avg_node_pred = np.mean(preds_test_node, axis=0) pred_test = np.average([avg_node_pred, avg_lgb_pred, avg_cb_pred], axis=0, weights=[-0.15447081, 1.1021915 , 0.06145868] )<load_from_csv>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return y_pred[:n].mean().item() except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.481
Deepfake Detection Challenge
8,526,543
df_sub = pd.read_csv('.. /input/tabular-playground-series-jan-2021/sample_submission.csv') df_sub.target = pred_test df_sub.head()<save_to_csv>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge
8,526,543
df_sub.to_csv('submission.csv', index=False )<load_from_csv>
speed_test = False
Deepfake Detection Challenge
8,526,543
train = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/train.csv', index_col='id') test = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/test.csv', index_col='id' )<import_modules>
if speed_test: start_time = time.time() speedtest_videos = test_videos[:5] predictions = predict_on_video_set(speedtest_videos, num_workers=4) elapsed = time.time() - start_time print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) )
Deepfake Detection Challenge
8,526,543
import seaborn as sns<filter>
%%time model.eval() predictions = predict_on_video_set(test_videos, num_workers=4 )
Deepfake Detection Challenge
8,526,543
train[train['target'] <= 4.5]<drop_column>
submission_df_xception = pd.DataFrame({"filename": test_videos, "label": predictions}) submission_df_xception.to_csv("submission_xception.csv", index=False )
Deepfake Detection Challenge
8,526,543
train.drop([241352, 284103, 300936, 307139, 355831], axis = 0, inplace=True) train[train['target'] <= 4.5]<drop_column>
submission_df = pd.DataFrame({"filename": test_videos}) submission_df["label"] = 0.51*submission_df_resnext["label"] + 0.5*submission_df_xception["label"]
Deepfake Detection Challenge
8,526,543
target = train['target'] train.drop('target', axis=1, inplace=True )<split>
submission_df.to_csv("submission.csv", index=False )
Deepfake Detection Challenge
8,342,909
X_train, X_valid, y_train, y_valid = train_test_split(train, target, test_size=0.15, random_state=0 )<train_model>
%matplotlib inline
Deepfake Detection Challenge
8,342,909
lgbm = LGBMRegressor(tree_method='gpu_hist',learning_rate=0.07, max_depth=15, random_state=0, n_estimators=2000, n_jobs=-1) lgbm.fit(X_train, y_train, eval_set=[(X_valid, y_valid)]) lgbm.score(X_valid, y_valid )<train_model>
package_path = '.. /input/efficientnet-pytorch/EfficientNet-PyTorch/EfficientNet-PyTorch-master' sys.path.append(package_path)
Deepfake Detection Challenge
8,342,909
xgb = XGBRegressor(learning_rate=0.005, max_depth=8, n_estimators=6000, n_jobs=-1, tree_method='gpu_hist', random_state=3) xgb.fit(X_train, y_train, eval_set=[(X_valid, y_valid)]) xgb.score(X_valid, y_valid )<train_model>
def seed_everything(seed=1234): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True
Deepfake Detection Challenge
8,342,909
ensemble = VotingRegressor(estimators=[("xgb", xgb),("lgbm", lgbm)], weights=[1.1,1]) ensemble.fit(X_train, y_train) ensemble.score(X_valid, y_valid )<train_model>
seed_everything(0 )
Deepfake Detection Challenge
8,342,909
model = ensemble.fit(train, target) model.score(train, target )<predict_on_test>
weight_path = 'efficientnet_b0_epoch_15_loss_0.158.pth' trained_weights_path = os.path.join('.. /input/deepfake-detection-model-weight', weight_path) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") torch.backends.cudnn.benchmark=True
Deepfake Detection Challenge
8,342,909
pred = model.predict(test) pred<load_from_csv>
test_dir = '.. /input/deepfake-detection-challenge/test_videos' os.listdir(test_dir)[:5]
Deepfake Detection Challenge
8,342,909
id = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/test.csv')['id']<save_to_csv>
class ImageTransform: def __init__(self, size, mean, std): self.data_transform = transforms.Compose([ transforms.Resize(( size, size), interpolation=Image.BILINEAR), transforms.ToTensor() , transforms.Normalize(mean, std) ]) def __call__(self, img): return self.data_transform(img )
Deepfake Detection Challenge
8,342,909
output = pd.DataFrame({'id': id, 'target': pred}) output.to_csv("submission.csv", index=False) print("saved" )<import_modules>
class DeepfakeDataset(Dataset): def __init__(self, file_list, device, detector, transform, img_num=20, frame_window=10): self.file_list = file_list self.device = device self.detector = detector self.transform = transform self.img_num = img_num self.frame_window = frame_window def __len__(self): return len(self.file_list) def __getitem__(self, idx): mov_path = self.file_list[idx] img_list = [] try: all_image = get_img_from_mov(mov_path, self.img_num, self.frame_window) except: return [], mov_path.split('/')[-1] for image in all_image: try: _image = image[np.newaxis, :, :, :] boxes, probs = self.detector.detect(_image, landmarks=False) x = int(boxes[0][0][0]) y = int(boxes[0][0][1]) z = int(boxes[0][0][2]) w = int(boxes[0][0][3]) image = image[y-15:w+15, x-15:z+15] image = Image.fromarray(image) image = self.transform(image) img_list.append(image) except: img_list.append(None) img_list = [c for c in img_list if c is not None] return img_list, mov_path.split('/')[-1]
Deepfake Detection Challenge
8,342,909
for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) input_path = Path('/kaggle/input/tabular-playground-series-jan-2021/' )<load_from_csv>
model = EfficientNet.from_name('efficientnet-b0') model._fc = nn.Linear(in_features=model._fc.in_features, out_features=1) model.load_state_dict(torch.load(trained_weights_path, map_location=torch.device(device)) )
Deepfake Detection Challenge
8,342,909
train = pd.read_csv(input_path / 'train.csv', index_col='id') print(train.shape) train.head()<load_from_csv>
test_file = [os.path.join(test_dir, path)for path in os.listdir(test_dir)]
Deepfake Detection Challenge
8,342,909
test = pd.read_csv(input_path / 'test.csv', index_col='id') print(test.shape) test.head()<count_missing_values>
def predict_dfdc(dataset, model): torch.cuda.empty_cache() pred_list = [] path_list = [] model = model.to(device) model.eval() with torch.no_grad() : for i in tqdm(range(len(dataset))): pred = 0 imgs, mov_path = dataset.__getitem__(i) if len(imgs)== 0: pred_list.append(0.5) path_list.append(mov_path) continue for i in range(len(imgs)) : img = imgs[i] output = model(img.unsqueeze(0 ).to(device)) pred += torch.sigmoid(output ).item() / len(imgs) pred_list.append(pred) path_list.append(mov_path) torch.cuda.empty_cache() return path_list, pred_list
Deepfake Detection Challenge
8,342,909
train.isnull().sum()<feature_engineering>
res = pd.DataFrame({ 'filename': path_list, 'label': pred_list, }) res.sort_values(by='filename', ascending=True, inplace=True )
Deepfake Detection Challenge
8,342,909
<feature_engineering><EOS>
res.to_csv('submission.csv', index=False )
Deepfake Detection Challenge
7,469,803
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<split>
%%capture !pip install /kaggle/input/facenet-pytorch-vggface2/facenet_pytorch-1.0.1-py3-none-any.whl !mkdir -p /tmp/.cache/torch/checkpoints/ !cp /kaggle/input/facenet-pytorch-vggface2/20180402-114759-vggface2-logits.pth /tmp/.cache/torch/checkpoints/vggface2_DG3kwML46X.pt !cp /kaggle/input/facenet-pytorch-vggface2/20180402-114759-vggface2-features.pth /tmp/.cache/torch/checkpoints/vggface2_G5aNV2VSMn.pt
Deepfake Detection Challenge
7,469,803
X_train, X_test, y_train, y_test = train_test_split(trainNorm, target, test_size=0.2, random_state=7) print('X_train: ', X_train.shape) print('X_test: ', X_test.shape) print('y_train: ', y_train.shape) print('y_test: ', y_test.shape )<compute_train_metric>
device = 'cuda:0' if torch.cuda.is_available() else 'cpu' print(f'Running on device: {device}' )
Deepfake Detection Challenge
7,469,803
def rmse_cv(model,X,y): rmse = np.sqrt(-cross_val_score(model, X, y, scoring="neg_mean_squared_error", cv=5)) return rmse<define_variables>
mtcnn = MTCNN(device=device ).eval() resnet = InceptionResnetV1(pretrained='vggface2', num_classes=2, device=device ).eval()
Deepfake Detection Challenge
7,469,803
models = [LinearRegression() , Ridge() , Lasso() , ElasticNet() , SGDRegressor() , BayesianRidge() , cb.CatBoostRegressor() , RandomForestRegressor() , ] names = ["LR", "Ridge", "Lasso", "ElasticNet", "SGD","BayesianRidge", "catboost","RandomForestRegressor"]<compute_train_metric>
bias = -0.4 weight = 0.068235746 submission = [] for filename, x_i in zip(filenames, X): if x_i is not None and len(x_i)== 10: prob = 1 /(1 + np.exp(-(bias +(weight * x_i ).sum()))) else: prob = 0.6 submission.append([os.path.basename(filename), prob] )
Deepfake Detection Challenge
7,469,803
<compute_train_metric><EOS>
submission = pd.DataFrame(submission, columns=['filename', 'label']) submission.sort_values('filename' ).to_csv('submission.csv', index=False )
Deepfake Detection Challenge
7,025,346
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<save_to_csv>
!pip install /kaggle/input/dfdcdataset/dlib-19.19.0-cp36-cp36m-linux_x86_64.whl
Deepfake Detection Challenge
7,025,346
<choose_model_class><EOS>
DATA_PREFIX = '/kaggle/input' SKIP_FRAMES = 75 detector = dlib.cnn_face_detection_model_v1(os.path.join(DATA_PREFIX, 'dfdcdataset', 'mmod_human_face_detector.dat')) sp = dlib.shape_predictor(os.path.join(DATA_PREFIX, 'dfdcdataset', 'shape_predictor_5_face_landmarks.dat')) predictor = dlib.deep_fake_detection_model_v1(os.path.join(DATA_PREFIX, 'dfdcdataset', 'deepfake_detector.dnn')) def align_face(frame, detection_sp): x_center = int(( detection_sp.part(0 ).x + detection_sp.part(2 ).x + detection_sp.part(4 ).x)/ 3) y_center = int(( detection_sp.part(4 ).y + detection_sp.part(0 ).y + detection_sp.part(2 ).y)/ 3) w = 2 * abs(detection_sp.part(0 ).x - detection_sp.part(2 ).x) h = w shape = frame.shape face_crop = frame[ max(int(y_center - h), 0):min(int(y_center + h), shape[0]), max(int(x_center - w), 0):min(int(x_center + w), shape[1]) ] return cv2.resize(face_crop,(150,150)) def align_face_dlib(frame, detection_sp): detections = dlib.full_object_detections() detections.append(detection_sp) return dlib.get_face_chips(frame, detections, size=150)[0] def predict_fake(face): return predictor.predict(face)[0] def process_frame(frame): labels = [] dets = detector(frame, 1) batch_faces = dlib.full_object_detections() for k,d in enumerate(dets): face = align_face_dlib(frame, sp(frame, d.rect)) labels.append(predict_fake(face)) return labels def process_video(video_filename): frame_labels = [] frames = [] cap = cv2.VideoCapture(video_filename) frame_count = 0 while cap.isOpened() : ret = cap.grab() frame_count += 1 if not ret: break if frame_count % SKIP_FRAMES: continue _, frame = cap.retrieve() frame_labels.extend(process_frame(frame)) cap.release() fakeness = statistics.mean(frame_labels) fakeness = 0.3 +(fakeness * 0.4) return fakeness def single_log_loss(prediction, groundtruth): return groundtruth * math.log(prediction)+(1-groundtruth)* math.log(1-prediction) def estimate_loss(predictions, all_correct=True): result = 0 for p in predictions: if all_correct: result += single_log_loss(p, 1 if p > 0.5 else 0) else: result += single_log_loss(p, 0 if p > 0.5 else 1) return -result/len(predictions) predictions = [] filenames = glob.glob(os.path.join(DATA_PREFIX, 'deepfake-detection-challenge/test_videos/*.mp4')) sub = pd.read_csv(os.path.join(DATA_PREFIX, 'deepfake-detection-challenge/sample_submission.csv')) sub = sub.set_index('filename', drop=False) print('Initialize submission') for filename in tqdm.tqdm(filenames): fn = filename.split('/')[-1] sub.loc[fn, 'label'] = 0.5 print('CUDA usage: {}'.format(dlib.DLIB_USE_CUDA)) for filename in tqdm.tqdm(filenames): fn = filename.split('/')[-1] sub.loc[fn, 'label'] = 0.5 try: start = timer() prediction = process_video(filename) sub.loc[fn, 'label'] = prediction sub.to_csv('submission.csv', index=False) predictions.append(prediction) print('Processed video {}, label={}, time={}'.format(filename, prediction, timedelta(seconds=timer() -start))) print('Possible lost: best={}, worse={}'.format(estimate_loss(predictions), estimate_loss(predictions, False))) except Exception as error: print('Failed to process {}'.format(filename)) sub.to_csv('submission.csv', index=False )
Deepfake Detection Challenge
8,201,023
<SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<train_model>
%matplotlib inline warnings.filterwarnings("ignore" )
Deepfake Detection Challenge
8,201,023
history = model.fit(X_train, y_train, validation_split=0.2, verbose=1, epochs=10 )<compute_test_metric>
test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/" test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"]) frame_h = 5 frame_l = 5 len(test_videos )
Deepfake Detection Challenge
8,201,023
final_predictions = model.predict(X_test) final_mse = mean_squared_error(y_test, final_predictions) final_rmse = np.sqrt(final_mse) print("RMSE on X_test ", round(final_rmse, 4))<predict_on_test>
print("PyTorch version:", torch.__version__) print("CUDA version:", torch.version.cuda) print("cuDNN version:", torch.backends.cudnn.version() )
Deepfake Detection Challenge
8,201,023
final_predictions = model.predict(testNorm) final_predictions.shape<load_from_csv>
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") gpu
Deepfake Detection Challenge
8,201,023
subm = pd.read_csv(input_path / 'sample_submission.csv') print(subm.shape) subm.head()<prepare_output>
facedet = BlazeFace().to(gpu) facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth") facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy") _ = facedet.train(False )
Deepfake Detection Challenge
8,201,023
id_col=subm.id subm=id_col.to_frame() subm['target'] = final_predictions subm.set_index('id',inplace=True) subm.head()<save_to_csv>
frames_per_video = 90 video_reader = VideoReader() video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video) face_extractor = FaceExtractor(video_read_fn, facedet )
Deepfake Detection Challenge
8,201,023
subm.to_csv('NNSubmission.csv' )<load_pretrained>
input_size = 250
Deepfake Detection Challenge
8,201,023
weight_base64 = base64.b64encode(bz2.compress(pickle.dumps(torch.load('.. /input/alphageese-training/alphageese_epoch30.pth', map_location=torch.device('cpu'))))) w = "weight= %s"%weight_base64 %store w >submission.py<set_options>
mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] normalize_transform = Normalize(mean, std )
Deepfake Detection Challenge
8,201,023
%%writefile -a submission.py <set_options>
class MyResNeXt(models.resnet.ResNet): def __init__(self, training=True): super(MyResNeXt, self ).__init__(block=models.resnet.Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4) self.fc = nn.Linear(2048, 1 )
Deepfake Detection Challenge
8,201,023
%%writefile -a submission.py class MCTS() : def __init__(self, game, nn_agent, eps=1e-8, cpuct=1.0): self.game = game self.nn_agent = nn_agent self.eps = eps self.cpuct = cpuct self.Qsa = {} self.Nsa = {} self.Ns = {} self.Ps = {} self.Vs = {} self.last_obs = None def getActionProb(self, obs, timelimit=1.0): start_time = time.time() while time.time() - start_time < timelimit: self.search(obs, self.last_obs) s = self.game.stringRepresentation(obs) i = obs.index counts = [ self.Nsa[(s, i, a)] if(s, i, a)in self.Nsa else 0 for a in range(self.game.getActionSize()) ] prob = counts / np.sum(counts) self.last_obs = obs return prob def search(self, obs, last_obs): s = self.game.stringRepresentation(obs) if s not in self.Ns: values = [-10] * 4 for i in range(4): if len(obs.geese[i])== 0: continue self.Ps[s, i], values[i] = self.nn_agent.predict(obs, last_obs, i) valids = self.game.getValidMoves(obs, last_obs, i) self.Ps[s, i] = self.Ps[s, i] * valids sum_Ps_s = np.sum(self.Ps[s, i]) if sum_Ps_s > 0: self.Ps[s, i] /= sum_Ps_s self.Vs[s, i] = valids self.Ns[s] = 0 return values best_acts = [None] * 4 for i in range(4): if len(obs.geese[i])== 0: continue valids = self.Vs[s, i] cur_best = -float('inf') best_act = self.game.actions[-1] for a in range(self.game.getActionSize()): if valids[a]: if(s, i, a)in self.Qsa: u = self.Qsa[(s, i, a)] + self.cpuct * self.Ps[s, i][a] * math.sqrt( self.Ns[s])/(1 + self.Nsa[(s, i, a)]) else: u = self.cpuct * self.Ps[s, i][a] * math.sqrt( self.Ns[s] + self.eps) if u > cur_best: cur_best = u best_act = self.game.actions[a] best_acts[i] = best_act next_obs = self.game.getNextState(obs, last_obs, best_acts) values = self.search(next_obs, obs) for i in range(4): if len(obs.geese[i])== 0: continue a = self.game.actions.index(best_acts[i]) v = values[i] if(s, i, a)in self.Qsa: self.Qsa[(s, i, a)] =(self.Nsa[(s, i, a)] * self.Qsa[ (s, i, a)] + v)/(self.Nsa[(s, i, a)] + 1) self.Nsa[(s, i, a)] += 1 else: self.Qsa[(s, i, a)] = v self.Nsa[(s, i, a)] = 1 self.Ns[s] += 1 return values<choose_model_class>
checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu) model = MyResNeXt().to(gpu) model.load_state_dict(checkpoint) _ = model.eval() del checkpoint
Deepfake Detection Challenge
8,201,023
%%writefile -a submission.py class HungryGeese(object): def __init__(self, rows=7, columns=11, actions=[Action.NORTH, Action.SOUTH, Action.WEST, Action.EAST], hunger_rate=40): self.rows = rows self.columns = columns self.actions = actions self.hunger_rate = hunger_rate def getActionSize(self): return len(self.actions) def getNextState(self, obs, last_obs, directions): next_obs = pickle.loads(pickle.dumps(obs, -1)) next_obs.step += 1 geese = next_obs.geese food = next_obs.food for i in range(4): goose = geese[i] if len(goose)== 0: continue head = translate(goose[0], directions[i], self.columns, self.rows) if last_obs is not None and head == last_obs.geese[i][0]: geese[i] = [] continue if head in food: food.remove(head) else: goose.pop() goose.insert(0, head) if next_obs.step % self.hunger_rate == 0: if len(goose)> 0: goose.pop() goose_positions = histogram( position for goose in geese for position in goose ) for i in range(4): if len(geese[i])> 0: head = geese[i][0] if goose_positions[head] > 1: geese[i] = [] return next_obs def getValidMoves(self, obs, last_obs, index): geese = obs.geese pos = geese[index][0] obstacles = {position for goose in geese for position in goose[:-1]} if last_obs is not None: obstacles.add(last_obs.geese[index][0]) valid_moves = [ translate(pos, action, self.columns, self.rows)not in obstacles for action in self.actions ] return valid_moves def stringRepresentation(self, obs): return str(obs.geese + obs.food )<train_on_grid>
def predict_on_video(video_path, batch_size): try: faces = face_extractor.process_video(video_path) face_extractor.keep_only_best_face(faces) if len(faces)> 0: x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8) n = 0 for frame_data in faces: for face in frame_data["faces"]: resized_face = isotropically_resize_image(face, input_size) resized_face = make_square_image(resized_face) if n < batch_size: x[n] = resized_face n += 1 else: print("WARNING: have %d faces but batch size is %d" %(n, batch_size)) if n > 0: x = torch.tensor(x, device=gpu ).float() x = x.permute(( 0, 3, 1, 2)) for i in range(len(x)) : x[i] = normalize_transform(x[i] / 255.) with torch.no_grad() : y_pred = model(x) y_pred = torch.sigmoid(y_pred.squeeze()) return np.percentile(y_pred[:n].to('cpu'), 52, interpolation="nearest") except Exception as e: print("Prediction error on video %s: %s" %(video_path, str(e))) return 0.481
Deepfake Detection Challenge
8,201,023
%%writefile -a submission.py class TorusConv2d(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, bn): super().__init__() self.edge_size =(kernel_size[0] // 2, kernel_size[1] // 2) self.conv = nn.Conv2d(input_dim, output_dim, kernel_size=kernel_size) self.bn = nn.BatchNorm2d(output_dim)if bn else None def forward(self, x): h = torch.cat([x[:,:,:,-self.edge_size[1]:], x, x[:,:,:,:self.edge_size[1]]], dim=3) h = torch.cat([h[:,:,-self.edge_size[0]:], h, h[:,:,:self.edge_size[0]]], dim=2) h = self.conv(h) h = self.bn(h)if self.bn is not None else h return h class GeeseNet(nn.Module): def __init__(self): super().__init__() layers, filters = 12, 32 self.conv0 = TorusConv2d(17, filters,(3, 3), True) self.blocks = nn.ModuleList([TorusConv2d(filters, filters,(3, 3), True)for _ in range(layers)]) self.head_p = nn.Linear(filters, 4, bias=False) self.head_v = nn.Linear(filters * 2, 1, bias=False) def forward(self, x): h = F.relu_(self.conv0(x)) for block in self.blocks: h = F.relu_(h + block(h)) h_head =(h * x[:,:1] ).view(h.size(0), h.size(1), -1 ).sum(-1) h_avg = h.view(h.size(0), h.size(1), -1 ).mean(-1) p = torch.softmax(self.head_p(h_head), 1) v = torch.tanh(self.head_v(torch.cat([h_head, h_avg], 1))) return p, v class NNAgent() : def __init__(self, state_dict): self.model = GeeseNet() self.model.load_state_dict(state_dict) self.model.eval() def predict(self, obs, last_obs, index): x = self._make_input(obs, last_obs, index) with torch.no_grad() : xt = torch.from_numpy(x ).unsqueeze(0) p, v = self.model(xt) return p.squeeze(0 ).detach().numpy() , v.item() def _make_input(self, obs, last_obs, index): b = np.zeros(( 17, 7 * 11), dtype=np.float32) for p, pos_list in enumerate(obs.geese): for pos in pos_list[:1]: b[0 +(p - index)% 4, pos] = 1 for pos in pos_list[-1:]: b[4 +(p - index)% 4, pos] = 1 for pos in pos_list: b[8 +(p - index)% 4, pos] = 1 if last_obs is not None: for p, pos_list in enumerate(last_obs.geese): for pos in pos_list[:1]: b[12 +(p - index)% 4, pos] = 1 for pos in obs.food: b[16, pos] = 1 return b.reshape(-1, 7, 11 )<statistical_test>
def predict_on_video_set(videos, num_workers): def process_file(i): filename = videos[i] y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video) return y_pred with ThreadPoolExecutor(max_workers=num_workers)as ex: predictions = ex.map(process_file, range(len(videos))) return list(predictions )
Deepfake Detection Challenge