Upload 3 files
Browse files- ucf.yaml +131 -0
- ucf_best.pth +3 -0
- xception.yaml +86 -0
ucf.yaml
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# log dir
|
| 2 |
+
log_dir: /data/home/zhiyuanyan/DeepfakeBench/debug_logs/ucf
|
| 3 |
+
|
| 4 |
+
# model setting
|
| 5 |
+
pretrained: /teamspace/studios/this_studio/DeepfakeBench/training/pretrained/xception-b5690688.pth # path to a pre-trained model, if using one
|
| 6 |
+
# pretrained: '/home/zhiyuanyan/.cache/torch/hub/checkpoints/resnet34-b627a593.pth' # path to a pre-trained model, if using one
|
| 7 |
+
model_name: ucf # model name
|
| 8 |
+
backbone_name: xception # backbone name
|
| 9 |
+
encoder_feat_dim: 512 # feature dimension of the backbone
|
| 10 |
+
|
| 11 |
+
#backbone setting
|
| 12 |
+
backbone_config:
|
| 13 |
+
mode: adjust_channel
|
| 14 |
+
num_classes: 2
|
| 15 |
+
inc: 3
|
| 16 |
+
dropout: false
|
| 17 |
+
|
| 18 |
+
# dataset
|
| 19 |
+
all_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT, FaceShifter, DeepFakeDetection, Celeb-DF-v1, Celeb-DF-v2, DFDCP, DFDC, DeeperForensics-1.0, UADFV]
|
| 20 |
+
train_dataset: [FF-F2F, FF-DF, FF-FS, FF-NT,]
|
| 21 |
+
test_dataset: [Celeb-DF-v2]
|
| 22 |
+
dataset_type: pair
|
| 23 |
+
|
| 24 |
+
compression: c23 # compression-level for videos
|
| 25 |
+
train_batchSize: 16 # training batch size
|
| 26 |
+
test_batchSize: 32 # test batch size
|
| 27 |
+
workers: 8 # number of data loading workers
|
| 28 |
+
frame_num: {'train': 32, 'test': 32} # number of frames to use per video in training and testing
|
| 29 |
+
resolution: 256 # resolution of output image to network
|
| 30 |
+
with_mask: false # whether to include mask information in the input
|
| 31 |
+
with_landmark: false # whether to include facial landmark information in the input
|
| 32 |
+
save_ckpt: true # whether to save checkpoint
|
| 33 |
+
save_feat: true # whether to save features
|
| 34 |
+
|
| 35 |
+
# label settings
|
| 36 |
+
label_dict:
|
| 37 |
+
# DFD
|
| 38 |
+
DFD_fake: 1
|
| 39 |
+
DFD_real: 0
|
| 40 |
+
FaceShifter: 1
|
| 41 |
+
FF-FH: 1
|
| 42 |
+
# FF++ + FaceShifter(FF-real+FF-FH)
|
| 43 |
+
# ucf specific label setting
|
| 44 |
+
FF-DF: 1
|
| 45 |
+
FF-F2F: 2
|
| 46 |
+
FF-FS: 3
|
| 47 |
+
FF-NT: 4
|
| 48 |
+
FF-real: 0
|
| 49 |
+
# CelebDF
|
| 50 |
+
CelebDFv1_real: 0
|
| 51 |
+
CelebDFv1_fake: 1
|
| 52 |
+
CelebDFv2_real: 0
|
| 53 |
+
CelebDFv2_fake: 1
|
| 54 |
+
# DFDCP
|
| 55 |
+
DFDCP_Real: 0
|
| 56 |
+
DFDCP_FakeA: 1
|
| 57 |
+
DFDCP_FakeB: 1
|
| 58 |
+
# DFDC
|
| 59 |
+
DFDC_Fake: 1
|
| 60 |
+
DFDC_Real: 0
|
| 61 |
+
# DeeperForensics-1.0
|
| 62 |
+
DF_fake: 1
|
| 63 |
+
DF_real: 0
|
| 64 |
+
# UADFV
|
| 65 |
+
UADFV_Fake: 1
|
| 66 |
+
UADFV_Real: 0
|
| 67 |
+
# roop
|
| 68 |
+
roop_Fake: 1
|
| 69 |
+
roop_Real: 0
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# data augmentation
|
| 74 |
+
use_data_augmentation: true # Add this flag to enable/disable data augmentation
|
| 75 |
+
data_aug:
|
| 76 |
+
flip_prob: 0.5
|
| 77 |
+
rotate_prob: 0.5
|
| 78 |
+
rotate_limit: [-10, 10]
|
| 79 |
+
blur_prob: 0.5
|
| 80 |
+
blur_limit: [3, 7]
|
| 81 |
+
brightness_prob: 0.5
|
| 82 |
+
brightness_limit: [-0.1, 0.1]
|
| 83 |
+
contrast_limit: [-0.1, 0.1]
|
| 84 |
+
quality_lower: 40
|
| 85 |
+
quality_upper: 100
|
| 86 |
+
|
| 87 |
+
# mean and std for normalization
|
| 88 |
+
mean: [0.5, 0.5, 0.5]
|
| 89 |
+
std: [0.5, 0.5, 0.5]
|
| 90 |
+
|
| 91 |
+
# optimizer config
|
| 92 |
+
optimizer:
|
| 93 |
+
# choose between 'adam' and 'sgd'
|
| 94 |
+
type: adam
|
| 95 |
+
adam:
|
| 96 |
+
lr: 0.0002 # learning rate
|
| 97 |
+
beta1: 0.9 # beta1 for Adam optimizer
|
| 98 |
+
beta2: 0.999 # beta2 for Adam optimizer
|
| 99 |
+
eps: 0.00000001 # epsilon for Adam optimizer
|
| 100 |
+
weight_decay: 0.0005 # weight decay for regularization
|
| 101 |
+
amsgrad: false
|
| 102 |
+
sgd:
|
| 103 |
+
lr: 0.0002 # learning rate
|
| 104 |
+
momentum: 0.9 # momentum for SGD optimizer
|
| 105 |
+
weight_decay: 0.0005 # weight decay for regularization
|
| 106 |
+
|
| 107 |
+
# training config
|
| 108 |
+
lr_scheduler: null # learning rate scheduler
|
| 109 |
+
nEpochs: 5 # number of epochs to train for
|
| 110 |
+
start_epoch: 0 # manual epoch number (useful for restarts)
|
| 111 |
+
save_epoch: 1 # interval epochs for saving models
|
| 112 |
+
rec_iter: 100 # interval iterations for recording
|
| 113 |
+
logdir: ./logs # folder to output images and logs
|
| 114 |
+
manualSeed: 1024 # manual seed for random number generation
|
| 115 |
+
save_ckpt: false # whether to save checkpoint
|
| 116 |
+
|
| 117 |
+
# loss function
|
| 118 |
+
loss_func:
|
| 119 |
+
cls_loss: cross_entropy # loss function to use
|
| 120 |
+
spe_loss: cross_entropy
|
| 121 |
+
con_loss: contrastive_regularization
|
| 122 |
+
rec_loss: l1loss
|
| 123 |
+
losstype: null
|
| 124 |
+
|
| 125 |
+
# metric
|
| 126 |
+
metric_scoring: auc # metric for evaluation (auc, acc, eer, ap)
|
| 127 |
+
|
| 128 |
+
# cuda
|
| 129 |
+
|
| 130 |
+
cuda: true # whether to use CUDA acceleration
|
| 131 |
+
cudnn: true # whether to use CuDNN for convolution operations
|
ucf_best.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4afa2517feeb473024e1b73049bfa61ce446eb4da14d17c864f2e2a76b3223e5
|
| 3 |
+
size 188006161
|
xception.yaml
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# log dir
|
| 2 |
+
log_dir: /teamspace/studios/this_studio/DeepfakeBench/logs/testing_bench
|
| 3 |
+
|
| 4 |
+
# model setting
|
| 5 |
+
pretrained: /teamspace/studios/this_studio/DeepfakeBench/training/pretrained/xception-b5690688.pth # path to a pre-trained model, if using one
|
| 6 |
+
model_name: xception # model name
|
| 7 |
+
backbone_name: xception # backbone name
|
| 8 |
+
|
| 9 |
+
#backbone setting
|
| 10 |
+
backbone_config:
|
| 11 |
+
mode: original
|
| 12 |
+
num_classes: 2
|
| 13 |
+
inc: 3
|
| 14 |
+
dropout: false
|
| 15 |
+
|
| 16 |
+
# dataset
|
| 17 |
+
all_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT, FaceShifter, DeepFakeDetection, Celeb-DF-v1, Celeb-DF-v2, DFDCP, DFDC, DeeperForensics-1.0, UADFV]
|
| 18 |
+
train_dataset: [Celeb-DF-v1, DFDCP]
|
| 19 |
+
test_dataset: [UADFV]
|
| 20 |
+
|
| 21 |
+
compression: c23 # compression-level for videos
|
| 22 |
+
train_batchSize: 32 # training batch size
|
| 23 |
+
test_batchSize: 32 # test batch size
|
| 24 |
+
workers: 8 # number of data loading workers
|
| 25 |
+
frame_num: {'train': 32, 'test': 32} # number of frames to use per video in training and testing
|
| 26 |
+
resolution: 256 # resolution of output image to network
|
| 27 |
+
with_mask: false # whether to include mask information in the input
|
| 28 |
+
with_landmark: false # whether to include facial landmark information in the input
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# data augmentation
|
| 32 |
+
use_data_augmentation: true # Add this flag to enable/disable data augmentation
|
| 33 |
+
data_aug:
|
| 34 |
+
flip_prob: 0.5
|
| 35 |
+
rotate_prob: 0.0
|
| 36 |
+
rotate_limit: [-10, 10]
|
| 37 |
+
blur_prob: 0.5
|
| 38 |
+
blur_limit: [3, 7]
|
| 39 |
+
brightness_prob: 0.5
|
| 40 |
+
brightness_limit: [-0.1, 0.1]
|
| 41 |
+
contrast_limit: [-0.1, 0.1]
|
| 42 |
+
quality_lower: 40
|
| 43 |
+
quality_upper: 100
|
| 44 |
+
|
| 45 |
+
# mean and std for normalization
|
| 46 |
+
mean: [0.5, 0.5, 0.5]
|
| 47 |
+
std: [0.5, 0.5, 0.5]
|
| 48 |
+
|
| 49 |
+
# optimizer config
|
| 50 |
+
optimizer:
|
| 51 |
+
# choose between 'adam' and 'sgd'
|
| 52 |
+
type: adam
|
| 53 |
+
adam:
|
| 54 |
+
lr: 0.0002 # learning rate
|
| 55 |
+
beta1: 0.9 # beta1 for Adam optimizer
|
| 56 |
+
beta2: 0.999 # beta2 for Adam optimizer
|
| 57 |
+
eps: 0.00000001 # epsilon for Adam optimizer
|
| 58 |
+
weight_decay: 0.0005 # weight decay for regularization
|
| 59 |
+
amsgrad: false
|
| 60 |
+
sgd:
|
| 61 |
+
lr: 0.0002 # learning rate
|
| 62 |
+
momentum: 0.9 # momentum for SGD optimizer
|
| 63 |
+
weight_decay: 0.0005 # weight decay for regularization
|
| 64 |
+
|
| 65 |
+
# training config
|
| 66 |
+
lr_scheduler: null # learning rate scheduler
|
| 67 |
+
nEpochs: 10 # number of epochs to train for
|
| 68 |
+
start_epoch: 0 # manual epoch number (useful for restarts)
|
| 69 |
+
save_epoch: 1 # interval epochs for saving models
|
| 70 |
+
rec_iter: 100 # interval iterations for recording
|
| 71 |
+
logdir: ./logs # folder to output images and logs
|
| 72 |
+
manualSeed: 1024 # manual seed for random number generation
|
| 73 |
+
save_ckpt: true # whether to save checkpoint
|
| 74 |
+
save_feat: true # whether to save features
|
| 75 |
+
|
| 76 |
+
# loss function
|
| 77 |
+
loss_func: cross_entropy # loss function to use
|
| 78 |
+
losstype: null
|
| 79 |
+
|
| 80 |
+
# metric
|
| 81 |
+
metric_scoring: auc # metric for evaluation (auc, acc, eer, ap)
|
| 82 |
+
|
| 83 |
+
# cuda
|
| 84 |
+
|
| 85 |
+
cuda: true # whether to use CUDA acceleration
|
| 86 |
+
cudnn: true # whether to use CuDNN for convolution operations
|