File size: 3,694 Bytes
5bd345e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
# log dir 
log_dir: /data/home/zhiyuanyan/DeepfakeBench/debug_logs/ucf

# model setting
pretrained: /teamspace/studios/this_studio/DeepfakeBench/training/pretrained/xception-b5690688.pth   # path to a pre-trained model, if using one
# pretrained: '/home/zhiyuanyan/.cache/torch/hub/checkpoints/resnet34-b627a593.pth'   # path to a pre-trained model, if using one
model_name: ucf   # model name
backbone_name: xception  # backbone name
encoder_feat_dim: 512  # feature dimension of the backbone

#backbone setting
backbone_config:
  mode: adjust_channel
  num_classes: 2
  inc: 3
  dropout: false

# dataset
all_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT, FaceShifter, DeepFakeDetection, Celeb-DF-v1, Celeb-DF-v2, DFDCP, DFDC, DeeperForensics-1.0, UADFV]
train_dataset: [FF-F2F, FF-DF, FF-FS, FF-NT,]
test_dataset: [Celeb-DF-v2]
dataset_type: pair

compression: c23  # compression-level for videos
train_batchSize: 16   # training batch size
test_batchSize: 32   # test batch size
workers: 8   # number of data loading workers
frame_num: {'train': 32, 'test': 32}   # number of frames to use per video in training and testing
resolution: 256   # resolution of output image to network
with_mask: false   # whether to include mask information in the input
with_landmark: false   # whether to include facial landmark information in the input
save_ckpt: true   # whether to save checkpoint
save_feat: true   # whether to save features

# label settings
label_dict:
  # DFD
  DFD_fake: 1
  DFD_real: 0
  FaceShifter: 1
  FF-FH: 1
  # FF++ + FaceShifter(FF-real+FF-FH)
  # ucf specific label setting
  FF-DF: 1
  FF-F2F: 2
  FF-FS: 3
  FF-NT: 4
  FF-real: 0
  # CelebDF
  CelebDFv1_real: 0
  CelebDFv1_fake: 1
  CelebDFv2_real: 0
  CelebDFv2_fake: 1
  # DFDCP
  DFDCP_Real: 0
  DFDCP_FakeA: 1
  DFDCP_FakeB: 1
  # DFDC
  DFDC_Fake: 1
  DFDC_Real: 0
  # DeeperForensics-1.0
  DF_fake: 1
  DF_real: 0
  # UADFV
  UADFV_Fake: 1
  UADFV_Real: 0
  # roop
  roop_Fake: 1
  roop_Real: 0



# data augmentation
use_data_augmentation: true  # Add this flag to enable/disable data augmentation
data_aug:
  flip_prob: 0.5
  rotate_prob: 0.5
  rotate_limit: [-10, 10]
  blur_prob: 0.5
  blur_limit: [3, 7]
  brightness_prob: 0.5
  brightness_limit: [-0.1, 0.1]
  contrast_limit: [-0.1, 0.1]
  quality_lower: 40
  quality_upper: 100

# mean and std for normalization
mean: [0.5, 0.5, 0.5]
std: [0.5, 0.5, 0.5]

# optimizer config
optimizer:
  # choose between 'adam' and 'sgd'
  type: adam
  adam:
    lr: 0.0002  # learning rate
    beta1: 0.9  # beta1 for Adam optimizer
    beta2: 0.999 # beta2 for Adam optimizer
    eps: 0.00000001  # epsilon for Adam optimizer
    weight_decay: 0.0005  # weight decay for regularization
    amsgrad: false
  sgd:
    lr: 0.0002  # learning rate
    momentum: 0.9  # momentum for SGD optimizer
    weight_decay: 0.0005  # weight decay for regularization

# training config
lr_scheduler: null   # learning rate scheduler
nEpochs: 5   # number of epochs to train for
start_epoch: 0   # manual epoch number (useful for restarts)
save_epoch: 1   # interval epochs for saving models
rec_iter: 100   # interval iterations for recording
logdir: ./logs   # folder to output images and logs
manualSeed: 1024   # manual seed for random number generation
save_ckpt: false   # whether to save checkpoint

# loss function
loss_func:
 cls_loss: cross_entropy   # loss function to use
 spe_loss: cross_entropy
 con_loss: contrastive_regularization
 rec_loss: l1loss
losstype: null

# metric
metric_scoring: auc   # metric for evaluation (auc, acc, eer, ap)

# cuda

cuda: true   # whether to use CUDA acceleration
cudnn: true   # whether to use CuDNN for convolution operations